blob: eadb91e1076beea8cfcdd136cdc711c4d1cf5cf0 [file] [log] [blame]
Lokesh Vutla32cd2512018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <mailbox.h>
14#include <dm/device.h>
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053015#include <linux/compat.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053016#include <linux/err.h>
17#include <linux/soc/ti/k3-sec-proxy.h>
18#include <linux/soc/ti/ti_sci_protocol.h>
19
20#include "ti_sci.h"
21
22/* List of all TI SCI devices active in system */
23static LIST_HEAD(ti_sci_list);
24
25/**
26 * struct ti_sci_xfer - Structure representing a message flow
27 * @tx_message: Transmit message
28 * @rx_len: Receive message length
29 */
30struct ti_sci_xfer {
31 struct k3_sec_proxy_msg tx_message;
32 u8 rx_len;
33};
34
35/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053036 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
37 * management representation of dev_ids.
38 * @dev_id: TISCI device ID
39 * @type: Corresponding id as identified by TISCI RM.
40 *
41 * Note: This is used only as a work around for using RM range apis
42 * for AM654 SoC. For future SoCs dev_id will be used as type
43 * for RM range APIs. In order to maintain ABI backward compatibility
44 * type is not being changed for AM654 SoC.
45 */
46struct ti_sci_rm_type_map {
47 u32 dev_id;
48 u16 type;
49};
50
51/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +053052 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053053 * @default_host_id: Host identifier representing the compute entity
54 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
55 * @max_msgs: Maximum number of messages that can be pending
56 * simultaneously in the system
57 * @max_msg_size: Maximum size of data per message that can be handled.
58 * @rm_type_map: RM resource type mapping structure.
Lokesh Vutla32cd2512018-08-27 15:57:32 +053059 */
60struct ti_sci_desc {
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053061 u8 default_host_id;
62 int max_rx_timeout_ms;
63 int max_msgs;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053064 int max_msg_size;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053065 struct ti_sci_rm_type_map *rm_type_map;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053066};
67
68/**
69 * struct ti_sci_info - Structure representing a TI SCI instance
70 * @dev: Device pointer
71 * @desc: SoC description for this instance
72 * @handle: Instance of TI SCI handle to send to clients.
73 * @chan_tx: Transmit mailbox channel
74 * @chan_rx: Receive mailbox channel
75 * @xfer: xfer info
76 * @list: list head
77 * @is_secure: Determines if the communication is through secure threads.
78 * @host_id: Host identifier representing the compute entity
79 * @seq: Seq id used for verification for tx and rx message.
80 */
81struct ti_sci_info {
82 struct udevice *dev;
83 const struct ti_sci_desc *desc;
84 struct ti_sci_handle handle;
85 struct mbox_chan chan_tx;
86 struct mbox_chan chan_rx;
87 struct mbox_chan chan_notify;
88 struct ti_sci_xfer xfer;
89 struct list_head list;
90 bool is_secure;
91 u8 host_id;
92 u8 seq;
93};
94
95#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
96
97/**
98 * ti_sci_setup_one_xfer() - Setup one message type
99 * @info: Pointer to SCI entity information
100 * @msg_type: Message type
101 * @msg_flags: Flag to set for the message
102 * @buf: Buffer to be send to mailbox channel
103 * @tx_message_size: transmit message size
104 * @rx_message_size: receive message size
105 *
106 * Helper function which is used by various command functions that are
107 * exposed to clients of this driver for allocating a message traffic event.
108 *
109 * Return: Corresponding ti_sci_xfer pointer if all went fine,
110 * else appropriate error pointer.
111 */
112static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
113 u16 msg_type, u32 msg_flags,
114 u32 *buf,
115 size_t tx_message_size,
116 size_t rx_message_size)
117{
118 struct ti_sci_xfer *xfer = &info->xfer;
119 struct ti_sci_msg_hdr *hdr;
120
121 /* Ensure we have sane transfer sizes */
122 if (rx_message_size > info->desc->max_msg_size ||
123 tx_message_size > info->desc->max_msg_size ||
124 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
125 return ERR_PTR(-ERANGE);
126
127 info->seq = ~info->seq;
128 xfer->tx_message.buf = buf;
129 xfer->tx_message.len = tx_message_size;
130 xfer->rx_len = (u8)rx_message_size;
131
132 hdr = (struct ti_sci_msg_hdr *)buf;
133 hdr->seq = info->seq;
134 hdr->type = msg_type;
135 hdr->host = info->host_id;
136 hdr->flags = msg_flags;
137
138 return xfer;
139}
140
141/**
142 * ti_sci_get_response() - Receive response from mailbox channel
143 * @info: Pointer to SCI entity information
144 * @xfer: Transfer to initiate and wait for response
145 * @chan: Channel to receive the response
146 *
147 * Return: -ETIMEDOUT in case of no response, if transmit error,
148 * return corresponding error, else if all goes well,
149 * return 0.
150 */
151static inline int ti_sci_get_response(struct ti_sci_info *info,
152 struct ti_sci_xfer *xfer,
153 struct mbox_chan *chan)
154{
155 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
156 struct ti_sci_secure_msg_hdr *secure_hdr;
157 struct ti_sci_msg_hdr *hdr;
158 int ret;
159
160 /* Receive the response */
Andreas Dannenberg32aebcf2019-04-24 14:20:08 -0500161 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530162 if (ret) {
163 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
164 __func__, ret);
165 return ret;
166 }
167
168 /* ToDo: Verify checksum */
169 if (info->is_secure) {
170 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
171 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
172 }
173
174 /* msg is updated by mailbox driver */
175 hdr = (struct ti_sci_msg_hdr *)msg->buf;
176
177 /* Sanity check for message response */
178 if (hdr->seq != info->seq) {
179 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
180 __func__, hdr->seq);
181 return ret;
182 }
183
184 if (msg->len > info->desc->max_msg_size) {
185 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
186 __func__, msg->len, info->desc->max_msg_size);
187 return -EINVAL;
188 }
189
190 if (msg->len < xfer->rx_len) {
191 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
192 __func__, msg->len, xfer->rx_len);
193 }
194
195 return ret;
196}
197
198/**
199 * ti_sci_do_xfer() - Do one transfer
200 * @info: Pointer to SCI entity information
201 * @xfer: Transfer to initiate and wait for response
202 *
203 * Return: 0 if all went fine, else return appropriate error.
204 */
205static inline int ti_sci_do_xfer(struct ti_sci_info *info,
206 struct ti_sci_xfer *xfer)
207{
208 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
209 u8 secure_buf[info->desc->max_msg_size];
210 struct ti_sci_secure_msg_hdr secure_hdr;
211 int ret;
212
213 if (info->is_secure) {
214 /* ToDo: get checksum of the entire message */
215 secure_hdr.checksum = 0;
216 secure_hdr.reserved = 0;
217 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
218 xfer->tx_message.len);
219
220 xfer->tx_message.buf = (u32 *)secure_buf;
221 xfer->tx_message.len += sizeof(secure_hdr);
222 xfer->rx_len += sizeof(secure_hdr);
223 }
224
225 /* Send the message */
226 ret = mbox_send(&info->chan_tx, msg);
227 if (ret) {
228 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
229 __func__, ret);
230 return ret;
231 }
232
233 return ti_sci_get_response(info, xfer, &info->chan_rx);
234}
235
236/**
237 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
238 * @handle: pointer to TI SCI handle
239 *
240 * Updates the SCI information in the internal data structure.
241 *
242 * Return: 0 if all went fine, else return appropriate error.
243 */
244static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
245{
246 struct ti_sci_msg_resp_version *rev_info;
247 struct ti_sci_version_info *ver;
248 struct ti_sci_msg_hdr hdr;
249 struct ti_sci_info *info;
250 struct ti_sci_xfer *xfer;
251 int ret;
252
253 if (IS_ERR(handle))
254 return PTR_ERR(handle);
255 if (!handle)
256 return -EINVAL;
257
258 info = handle_to_ti_sci_info(handle);
259
Andrew F. Davisefbfd442019-04-29 09:04:11 -0400260 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
261 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530262 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
263 sizeof(*rev_info));
264 if (IS_ERR(xfer)) {
265 ret = PTR_ERR(xfer);
266 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
267 return ret;
268 }
269
270 ret = ti_sci_do_xfer(info, xfer);
271 if (ret) {
272 dev_err(info->dev, "Mbox communication fail %d\n", ret);
273 return ret;
274 }
275
276 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
277
278 ver = &handle->version;
279 ver->abi_major = rev_info->abi_major;
280 ver->abi_minor = rev_info->abi_minor;
281 ver->firmware_revision = rev_info->firmware_revision;
282 strncpy(ver->firmware_description, rev_info->firmware_description,
283 sizeof(ver->firmware_description));
284
285 return 0;
286}
287
288/**
289 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
290 * @r: pointer to response buffer
291 *
292 * Return: true if the response was an ACK, else returns false.
293 */
294static inline bool ti_sci_is_response_ack(void *r)
295{
296 struct ti_sci_msg_hdr *hdr = r;
297
298 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
299}
300
301/**
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530302 * cmd_set_board_config_using_msg() - Common command to send board configuration
303 * message
304 * @handle: pointer to TI SCI handle
305 * @msg_type: One of the TISCI message types to set board configuration
306 * @addr: Address where the board config structure is located
307 * @size: Size of the board config structure
308 *
309 * Return: 0 if all went well, else returns appropriate error value.
310 */
311static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
312 u16 msg_type, u64 addr, u32 size)
313{
314 struct ti_sci_msg_board_config req;
315 struct ti_sci_msg_hdr *resp;
316 struct ti_sci_info *info;
317 struct ti_sci_xfer *xfer;
318 int ret = 0;
319
320 if (IS_ERR(handle))
321 return PTR_ERR(handle);
322 if (!handle)
323 return -EINVAL;
324
325 info = handle_to_ti_sci_info(handle);
326
327 xfer = ti_sci_setup_one_xfer(info, msg_type,
328 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
329 (u32 *)&req, sizeof(req), sizeof(*resp));
330 if (IS_ERR(xfer)) {
331 ret = PTR_ERR(xfer);
332 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
333 return ret;
334 }
335 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
336 req.boardcfgp_low = addr & 0xffffffff;
337 req.boardcfg_size = size;
338
339 ret = ti_sci_do_xfer(info, xfer);
340 if (ret) {
341 dev_err(info->dev, "Mbox send fail %d\n", ret);
342 return ret;
343 }
344
345 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
346
347 if (!ti_sci_is_response_ack(resp))
348 return -ENODEV;
349
350 return ret;
351}
352
353/**
354 * ti_sci_cmd_set_board_config() - Command to send board configuration message
355 * @handle: pointer to TI SCI handle
356 * @addr: Address where the board config structure is located
357 * @size: Size of the board config structure
358 *
359 * Return: 0 if all went well, else returns appropriate error value.
360 */
361static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
362 u64 addr, u32 size)
363{
364 return cmd_set_board_config_using_msg(handle,
365 TI_SCI_MSG_BOARD_CONFIG,
366 addr, size);
367}
368
369/**
370 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
371 * management configuration
372 * @handle: pointer to TI SCI handle
373 * @addr: Address where the board RM config structure is located
374 * @size: Size of the RM config structure
375 *
376 * Return: 0 if all went well, else returns appropriate error value.
377 */
378static
379int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
380 u64 addr, u32 size)
381{
382 return cmd_set_board_config_using_msg(handle,
383 TI_SCI_MSG_BOARD_CONFIG_RM,
384 addr, size);
385}
386
387/**
388 * ti_sci_cmd_set_board_config_security() - Command to send board security
389 * configuration message
390 * @handle: pointer to TI SCI handle
391 * @addr: Address where the board security config structure is located
392 * @size: Size of the security config structure
393 *
394 * Return: 0 if all went well, else returns appropriate error value.
395 */
396static
397int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
398 u64 addr, u32 size)
399{
400 return cmd_set_board_config_using_msg(handle,
401 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
402 addr, size);
403}
404
405/**
406 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
407 * configuration message
408 * @handle: pointer to TI SCI handle
409 * @addr: Address where the board PM config structure is located
410 * @size: Size of the PM config structure
411 *
412 * Return: 0 if all went well, else returns appropriate error value.
413 */
414static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
415 u64 addr, u32 size)
416{
417 return cmd_set_board_config_using_msg(handle,
418 TI_SCI_MSG_BOARD_CONFIG_PM,
419 addr, size);
420}
421
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530422/**
423 * ti_sci_set_device_state() - Set device state helper
424 * @handle: pointer to TI SCI handle
425 * @id: Device identifier
426 * @flags: flags to setup for the device
427 * @state: State to move the device to
428 *
429 * Return: 0 if all went well, else returns appropriate error value.
430 */
431static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
432 u32 id, u32 flags, u8 state)
433{
434 struct ti_sci_msg_req_set_device_state req;
435 struct ti_sci_msg_hdr *resp;
436 struct ti_sci_info *info;
437 struct ti_sci_xfer *xfer;
438 int ret = 0;
439
440 if (IS_ERR(handle))
441 return PTR_ERR(handle);
442 if (!handle)
443 return -EINVAL;
444
445 info = handle_to_ti_sci_info(handle);
446
447 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
448 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
449 (u32 *)&req, sizeof(req), sizeof(*resp));
450 if (IS_ERR(xfer)) {
451 ret = PTR_ERR(xfer);
452 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
453 return ret;
454 }
455 req.id = id;
456 req.state = state;
457
458 ret = ti_sci_do_xfer(info, xfer);
459 if (ret) {
460 dev_err(info->dev, "Mbox send fail %d\n", ret);
461 return ret;
462 }
463
464 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
465
466 if (!ti_sci_is_response_ack(resp))
467 return -ENODEV;
468
469 return ret;
470}
471
472/**
473 * ti_sci_get_device_state() - Get device state helper
474 * @handle: Handle to the device
475 * @id: Device Identifier
476 * @clcnt: Pointer to Context Loss Count
477 * @resets: pointer to resets
478 * @p_state: pointer to p_state
479 * @c_state: pointer to c_state
480 *
481 * Return: 0 if all went fine, else return appropriate error.
482 */
483static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
484 u32 id, u32 *clcnt, u32 *resets,
485 u8 *p_state, u8 *c_state)
486{
487 struct ti_sci_msg_resp_get_device_state *resp;
488 struct ti_sci_msg_req_get_device_state req;
489 struct ti_sci_info *info;
490 struct ti_sci_xfer *xfer;
491 int ret = 0;
492
493 if (IS_ERR(handle))
494 return PTR_ERR(handle);
495 if (!handle)
496 return -EINVAL;
497
498 if (!clcnt && !resets && !p_state && !c_state)
499 return -EINVAL;
500
501 info = handle_to_ti_sci_info(handle);
502
Andrew F. Davisefbfd442019-04-29 09:04:11 -0400503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
504 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530505 (u32 *)&req, sizeof(req), sizeof(*resp));
506 if (IS_ERR(xfer)) {
507 ret = PTR_ERR(xfer);
508 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
509 return ret;
510 }
511 req.id = id;
512
513 ret = ti_sci_do_xfer(info, xfer);
514 if (ret) {
515 dev_err(dev, "Mbox send fail %d\n", ret);
516 return ret;
517 }
518
519 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
520 if (!ti_sci_is_response_ack(resp))
521 return -ENODEV;
522
523 if (clcnt)
524 *clcnt = resp->context_loss_count;
525 if (resets)
526 *resets = resp->resets;
527 if (p_state)
528 *p_state = resp->programmed_state;
529 if (c_state)
530 *c_state = resp->current_state;
531
532 return ret;
533}
534
535/**
536 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
537 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
538 * @id: Device Identifier
539 *
540 * Request for the device - NOTE: the client MUST maintain integrity of
541 * usage count by balancing get_device with put_device. No refcounting is
542 * managed by driver for that purpose.
543 *
544 * NOTE: The request is for exclusive access for the processor.
545 *
546 * Return: 0 if all went fine, else return appropriate error.
547 */
548static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
549{
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530550 return ti_sci_set_device_state(handle, id, 0,
551 MSG_DEVICE_SW_STATE_ON);
552}
553
554static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
555 u32 id)
556{
557 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530558 MSG_DEVICE_SW_STATE_ON);
559}
560
561/**
562 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
563 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
564 * @id: Device Identifier
565 *
566 * Request for the device - NOTE: the client MUST maintain integrity of
567 * usage count by balancing get_device with put_device. No refcounting is
568 * managed by driver for that purpose.
569 *
570 * Return: 0 if all went fine, else return appropriate error.
571 */
572static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
573{
574 return ti_sci_set_device_state(handle, id,
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530575 0,
576 MSG_DEVICE_SW_STATE_RETENTION);
577}
578
579static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
580 u32 id)
581{
582 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530583 MSG_DEVICE_SW_STATE_RETENTION);
584}
585
586/**
587 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
588 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
589 * @id: Device Identifier
590 *
591 * Request for the device - NOTE: the client MUST maintain integrity of
592 * usage count by balancing get_device with put_device. No refcounting is
593 * managed by driver for that purpose.
594 *
595 * Return: 0 if all went fine, else return appropriate error.
596 */
597static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
598{
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530599 return ti_sci_set_device_state(handle, id, 0,
600 MSG_DEVICE_SW_STATE_AUTO_OFF);
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530601}
602
603/**
604 * ti_sci_cmd_dev_is_valid() - Is the device valid
605 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
606 * @id: Device Identifier
607 *
608 * Return: 0 if all went fine and the device ID is valid, else return
609 * appropriate error.
610 */
611static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
612{
613 u8 unused;
614
615 /* check the device state which will also tell us if the ID is valid */
616 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
617}
618
619/**
620 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
621 * @handle: Pointer to TISCI handle
622 * @id: Device Identifier
623 * @count: Pointer to Context Loss counter to populate
624 *
625 * Return: 0 if all went fine, else return appropriate error.
626 */
627static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
628 u32 *count)
629{
630 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
631}
632
633/**
634 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
635 * @handle: Pointer to TISCI handle
636 * @id: Device Identifier
637 * @r_state: true if requested to be idle
638 *
639 * Return: 0 if all went fine, else return appropriate error.
640 */
641static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
642 bool *r_state)
643{
644 int ret;
645 u8 state;
646
647 if (!r_state)
648 return -EINVAL;
649
650 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
651 if (ret)
652 return ret;
653
654 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
655
656 return 0;
657}
658
659/**
660 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
661 * @handle: Pointer to TISCI handle
662 * @id: Device Identifier
663 * @r_state: true if requested to be stopped
664 * @curr_state: true if currently stopped.
665 *
666 * Return: 0 if all went fine, else return appropriate error.
667 */
668static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
669 bool *r_state, bool *curr_state)
670{
671 int ret;
672 u8 p_state, c_state;
673
674 if (!r_state && !curr_state)
675 return -EINVAL;
676
677 ret =
678 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
679 if (ret)
680 return ret;
681
682 if (r_state)
683 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
684 if (curr_state)
685 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
686
687 return 0;
688}
689
690/**
691 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
692 * @handle: Pointer to TISCI handle
693 * @id: Device Identifier
694 * @r_state: true if requested to be ON
695 * @curr_state: true if currently ON and active
696 *
697 * Return: 0 if all went fine, else return appropriate error.
698 */
699static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
700 bool *r_state, bool *curr_state)
701{
702 int ret;
703 u8 p_state, c_state;
704
705 if (!r_state && !curr_state)
706 return -EINVAL;
707
708 ret =
709 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
710 if (ret)
711 return ret;
712
713 if (r_state)
714 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
715 if (curr_state)
716 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
717
718 return 0;
719}
720
721/**
722 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
723 * @handle: Pointer to TISCI handle
724 * @id: Device Identifier
725 * @curr_state: true if currently transitioning.
726 *
727 * Return: 0 if all went fine, else return appropriate error.
728 */
729static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
730 bool *curr_state)
731{
732 int ret;
733 u8 state;
734
735 if (!curr_state)
736 return -EINVAL;
737
738 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
739 if (ret)
740 return ret;
741
742 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
743
744 return 0;
745}
746
747/**
748 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
749 * by TISCI
750 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
751 * @id: Device Identifier
752 * @reset_state: Device specific reset bit field
753 *
754 * Return: 0 if all went fine, else return appropriate error.
755 */
756static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
757 u32 id, u32 reset_state)
758{
759 struct ti_sci_msg_req_set_device_resets req;
760 struct ti_sci_msg_hdr *resp;
761 struct ti_sci_info *info;
762 struct ti_sci_xfer *xfer;
763 int ret = 0;
764
765 if (IS_ERR(handle))
766 return PTR_ERR(handle);
767 if (!handle)
768 return -EINVAL;
769
770 info = handle_to_ti_sci_info(handle);
771
772 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
773 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
774 (u32 *)&req, sizeof(req), sizeof(*resp));
775 if (IS_ERR(xfer)) {
776 ret = PTR_ERR(xfer);
777 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
778 return ret;
779 }
780 req.id = id;
781 req.resets = reset_state;
782
783 ret = ti_sci_do_xfer(info, xfer);
784 if (ret) {
785 dev_err(info->dev, "Mbox send fail %d\n", ret);
786 return ret;
787 }
788
789 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
790
791 if (!ti_sci_is_response_ack(resp))
792 return -ENODEV;
793
794 return ret;
795}
796
797/**
798 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
799 * by TISCI
800 * @handle: Pointer to TISCI handle
801 * @id: Device Identifier
802 * @reset_state: Pointer to reset state to populate
803 *
804 * Return: 0 if all went fine, else return appropriate error.
805 */
806static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
807 u32 id, u32 *reset_state)
808{
809 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
810 NULL);
811}
812
Lokesh Vutla9b871812018-08-27 15:57:35 +0530813/**
814 * ti_sci_set_clock_state() - Set clock state helper
815 * @handle: pointer to TI SCI handle
816 * @dev_id: Device identifier this request is for
817 * @clk_id: Clock identifier for the device for this request.
818 * Each device has it's own set of clock inputs. This indexes
819 * which clock input to modify.
820 * @flags: Header flags as needed
821 * @state: State to request for the clock.
822 *
823 * Return: 0 if all went well, else returns appropriate error value.
824 */
825static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
826 u32 dev_id, u8 clk_id,
827 u32 flags, u8 state)
828{
829 struct ti_sci_msg_req_set_clock_state req;
830 struct ti_sci_msg_hdr *resp;
831 struct ti_sci_info *info;
832 struct ti_sci_xfer *xfer;
833 int ret = 0;
834
835 if (IS_ERR(handle))
836 return PTR_ERR(handle);
837 if (!handle)
838 return -EINVAL;
839
840 info = handle_to_ti_sci_info(handle);
841
842 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
843 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
844 (u32 *)&req, sizeof(req), sizeof(*resp));
845 if (IS_ERR(xfer)) {
846 ret = PTR_ERR(xfer);
847 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
848 return ret;
849 }
850 req.dev_id = dev_id;
851 req.clk_id = clk_id;
852 req.request_state = state;
853
854 ret = ti_sci_do_xfer(info, xfer);
855 if (ret) {
856 dev_err(info->dev, "Mbox send fail %d\n", ret);
857 return ret;
858 }
859
860 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
861
862 if (!ti_sci_is_response_ack(resp))
863 return -ENODEV;
864
865 return ret;
866}
867
868/**
869 * ti_sci_cmd_get_clock_state() - Get clock state helper
870 * @handle: pointer to TI SCI handle
871 * @dev_id: Device identifier this request is for
872 * @clk_id: Clock identifier for the device for this request.
873 * Each device has it's own set of clock inputs. This indexes
874 * which clock input to modify.
875 * @programmed_state: State requested for clock to move to
876 * @current_state: State that the clock is currently in
877 *
878 * Return: 0 if all went well, else returns appropriate error value.
879 */
880static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
881 u32 dev_id, u8 clk_id,
882 u8 *programmed_state, u8 *current_state)
883{
884 struct ti_sci_msg_resp_get_clock_state *resp;
885 struct ti_sci_msg_req_get_clock_state req;
886 struct ti_sci_info *info;
887 struct ti_sci_xfer *xfer;
888 int ret = 0;
889
890 if (IS_ERR(handle))
891 return PTR_ERR(handle);
892 if (!handle)
893 return -EINVAL;
894
895 if (!programmed_state && !current_state)
896 return -EINVAL;
897
898 info = handle_to_ti_sci_info(handle);
899
900 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
901 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
902 (u32 *)&req, sizeof(req), sizeof(*resp));
903 if (IS_ERR(xfer)) {
904 ret = PTR_ERR(xfer);
905 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
906 return ret;
907 }
908 req.dev_id = dev_id;
909 req.clk_id = clk_id;
910
911 ret = ti_sci_do_xfer(info, xfer);
912 if (ret) {
913 dev_err(info->dev, "Mbox send fail %d\n", ret);
914 return ret;
915 }
916
917 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
918
919 if (!ti_sci_is_response_ack(resp))
920 return -ENODEV;
921
922 if (programmed_state)
923 *programmed_state = resp->programmed_state;
924 if (current_state)
925 *current_state = resp->current_state;
926
927 return ret;
928}
929
930/**
931 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
932 * @handle: pointer to TI SCI handle
933 * @dev_id: Device identifier this request is for
934 * @clk_id: Clock identifier for the device for this request.
935 * Each device has it's own set of clock inputs. This indexes
936 * which clock input to modify.
937 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
938 * @can_change_freq: 'true' if frequency change is desired, else 'false'
939 * @enable_input_term: 'true' if input termination is desired, else 'false'
940 *
941 * Return: 0 if all went well, else returns appropriate error value.
942 */
943static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
944 u8 clk_id, bool needs_ssc, bool can_change_freq,
945 bool enable_input_term)
946{
947 u32 flags = 0;
948
949 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
950 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
951 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
952
953 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
954 MSG_CLOCK_SW_STATE_REQ);
955}
956
957/**
958 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
959 * @handle: pointer to TI SCI handle
960 * @dev_id: Device identifier this request is for
961 * @clk_id: Clock identifier for the device for this request.
962 * Each device has it's own set of clock inputs. This indexes
963 * which clock input to modify.
964 *
965 * NOTE: This clock must have been requested by get_clock previously.
966 *
967 * Return: 0 if all went well, else returns appropriate error value.
968 */
969static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
970 u32 dev_id, u8 clk_id)
971{
972 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
973 MSG_CLOCK_SW_STATE_UNREQ);
974}
975
976/**
977 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
978 * @handle: pointer to TI SCI handle
979 * @dev_id: Device identifier this request is for
980 * @clk_id: Clock identifier for the device for this request.
981 * Each device has it's own set of clock inputs. This indexes
982 * which clock input to modify.
983 *
984 * NOTE: This clock must have been requested by get_clock previously.
985 *
986 * Return: 0 if all went well, else returns appropriate error value.
987 */
988static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
989 u32 dev_id, u8 clk_id)
990{
991 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
992 MSG_CLOCK_SW_STATE_AUTO);
993}
994
995/**
996 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
997 * @handle: pointer to TI SCI handle
998 * @dev_id: Device identifier this request is for
999 * @clk_id: Clock identifier for the device for this request.
1000 * Each device has it's own set of clock inputs. This indexes
1001 * which clock input to modify.
1002 * @req_state: state indicating if the clock is auto managed
1003 *
1004 * Return: 0 if all went well, else returns appropriate error value.
1005 */
1006static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1007 u32 dev_id, u8 clk_id, bool *req_state)
1008{
1009 u8 state = 0;
1010 int ret;
1011
1012 if (!req_state)
1013 return -EINVAL;
1014
1015 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1016 if (ret)
1017 return ret;
1018
1019 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1020 return 0;
1021}
1022
1023/**
1024 * ti_sci_cmd_clk_is_on() - Is the clock ON
1025 * @handle: pointer to TI SCI handle
1026 * @dev_id: Device identifier this request is for
1027 * @clk_id: Clock identifier for the device for this request.
1028 * Each device has it's own set of clock inputs. This indexes
1029 * which clock input to modify.
1030 * @req_state: state indicating if the clock is managed by us and enabled
1031 * @curr_state: state indicating if the clock is ready for operation
1032 *
1033 * Return: 0 if all went well, else returns appropriate error value.
1034 */
1035static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1036 u8 clk_id, bool *req_state, bool *curr_state)
1037{
1038 u8 c_state = 0, r_state = 0;
1039 int ret;
1040
1041 if (!req_state && !curr_state)
1042 return -EINVAL;
1043
1044 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1045 &r_state, &c_state);
1046 if (ret)
1047 return ret;
1048
1049 if (req_state)
1050 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1051 if (curr_state)
1052 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1053 return 0;
1054}
1055
1056/**
1057 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1058 * @handle: pointer to TI SCI handle
1059 * @dev_id: Device identifier this request is for
1060 * @clk_id: Clock identifier for the device for this request.
1061 * Each device has it's own set of clock inputs. This indexes
1062 * which clock input to modify.
1063 * @req_state: state indicating if the clock is managed by us and disabled
1064 * @curr_state: state indicating if the clock is NOT ready for operation
1065 *
1066 * Return: 0 if all went well, else returns appropriate error value.
1067 */
1068static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1069 u8 clk_id, bool *req_state, bool *curr_state)
1070{
1071 u8 c_state = 0, r_state = 0;
1072 int ret;
1073
1074 if (!req_state && !curr_state)
1075 return -EINVAL;
1076
1077 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1078 &r_state, &c_state);
1079 if (ret)
1080 return ret;
1081
1082 if (req_state)
1083 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1084 if (curr_state)
1085 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1086 return 0;
1087}
1088
1089/**
1090 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1091 * @handle: pointer to TI SCI handle
1092 * @dev_id: Device identifier this request is for
1093 * @clk_id: Clock identifier for the device for this request.
1094 * Each device has it's own set of clock inputs. This indexes
1095 * which clock input to modify.
1096 * @parent_id: Parent clock identifier to set
1097 *
1098 * Return: 0 if all went well, else returns appropriate error value.
1099 */
1100static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1101 u32 dev_id, u8 clk_id, u8 parent_id)
1102{
1103 struct ti_sci_msg_req_set_clock_parent req;
1104 struct ti_sci_msg_hdr *resp;
1105 struct ti_sci_info *info;
1106 struct ti_sci_xfer *xfer;
1107 int ret = 0;
1108
1109 if (IS_ERR(handle))
1110 return PTR_ERR(handle);
1111 if (!handle)
1112 return -EINVAL;
1113
1114 info = handle_to_ti_sci_info(handle);
1115
1116 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1117 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1118 (u32 *)&req, sizeof(req), sizeof(*resp));
1119 if (IS_ERR(xfer)) {
1120 ret = PTR_ERR(xfer);
1121 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1122 return ret;
1123 }
1124 req.dev_id = dev_id;
1125 req.clk_id = clk_id;
1126 req.parent_id = parent_id;
1127
1128 ret = ti_sci_do_xfer(info, xfer);
1129 if (ret) {
1130 dev_err(info->dev, "Mbox send fail %d\n", ret);
1131 return ret;
1132 }
1133
1134 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1135
1136 if (!ti_sci_is_response_ack(resp))
1137 return -ENODEV;
1138
1139 return ret;
1140}
1141
1142/**
1143 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1144 * @handle: pointer to TI SCI handle
1145 * @dev_id: Device identifier this request is for
1146 * @clk_id: Clock identifier for the device for this request.
1147 * Each device has it's own set of clock inputs. This indexes
1148 * which clock input to modify.
1149 * @parent_id: Current clock parent
1150 *
1151 * Return: 0 if all went well, else returns appropriate error value.
1152 */
1153static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1154 u32 dev_id, u8 clk_id, u8 *parent_id)
1155{
1156 struct ti_sci_msg_resp_get_clock_parent *resp;
1157 struct ti_sci_msg_req_get_clock_parent req;
1158 struct ti_sci_info *info;
1159 struct ti_sci_xfer *xfer;
1160 int ret = 0;
1161
1162 if (IS_ERR(handle))
1163 return PTR_ERR(handle);
1164 if (!handle || !parent_id)
1165 return -EINVAL;
1166
1167 info = handle_to_ti_sci_info(handle);
1168
1169 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1170 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1171 (u32 *)&req, sizeof(req), sizeof(*resp));
1172 if (IS_ERR(xfer)) {
1173 ret = PTR_ERR(xfer);
1174 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1175 return ret;
1176 }
1177 req.dev_id = dev_id;
1178 req.clk_id = clk_id;
1179
1180 ret = ti_sci_do_xfer(info, xfer);
1181 if (ret) {
1182 dev_err(info->dev, "Mbox send fail %d\n", ret);
1183 return ret;
1184 }
1185
1186 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1187
1188 if (!ti_sci_is_response_ack(resp))
1189 ret = -ENODEV;
1190 else
1191 *parent_id = resp->parent_id;
1192
1193 return ret;
1194}
1195
1196/**
1197 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1198 * @handle: pointer to TI SCI handle
1199 * @dev_id: Device identifier this request is for
1200 * @clk_id: Clock identifier for the device for this request.
1201 * Each device has it's own set of clock inputs. This indexes
1202 * which clock input to modify.
1203 * @num_parents: Returns he number of parents to the current clock.
1204 *
1205 * Return: 0 if all went well, else returns appropriate error value.
1206 */
1207static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1208 u32 dev_id, u8 clk_id,
1209 u8 *num_parents)
1210{
1211 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1212 struct ti_sci_msg_req_get_clock_num_parents req;
1213 struct ti_sci_info *info;
1214 struct ti_sci_xfer *xfer;
1215 int ret = 0;
1216
1217 if (IS_ERR(handle))
1218 return PTR_ERR(handle);
1219 if (!handle || !num_parents)
1220 return -EINVAL;
1221
1222 info = handle_to_ti_sci_info(handle);
1223
1224 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1225 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1226 (u32 *)&req, sizeof(req), sizeof(*resp));
1227 if (IS_ERR(xfer)) {
1228 ret = PTR_ERR(xfer);
1229 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1230 return ret;
1231 }
1232 req.dev_id = dev_id;
1233 req.clk_id = clk_id;
1234
1235 ret = ti_sci_do_xfer(info, xfer);
1236 if (ret) {
1237 dev_err(info->dev, "Mbox send fail %d\n", ret);
1238 return ret;
1239 }
1240
1241 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1242 xfer->tx_message.buf;
1243
1244 if (!ti_sci_is_response_ack(resp))
1245 ret = -ENODEV;
1246 else
1247 *num_parents = resp->num_parents;
1248
1249 return ret;
1250}
1251
1252/**
1253 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1254 * @handle: pointer to TI SCI handle
1255 * @dev_id: Device identifier this request is for
1256 * @clk_id: Clock identifier for the device for this request.
1257 * Each device has it's own set of clock inputs. This indexes
1258 * which clock input to modify.
1259 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1260 * allowable programmed frequency and does not account for clock
1261 * tolerances and jitter.
1262 * @target_freq: The target clock frequency in Hz. A frequency will be
1263 * processed as close to this target frequency as possible.
1264 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1265 * allowable programmed frequency and does not account for clock
1266 * tolerances and jitter.
1267 * @match_freq: Frequency match in Hz response.
1268 *
1269 * Return: 0 if all went well, else returns appropriate error value.
1270 */
1271static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1272 u32 dev_id, u8 clk_id, u64 min_freq,
1273 u64 target_freq, u64 max_freq,
1274 u64 *match_freq)
1275{
1276 struct ti_sci_msg_resp_query_clock_freq *resp;
1277 struct ti_sci_msg_req_query_clock_freq req;
1278 struct ti_sci_info *info;
1279 struct ti_sci_xfer *xfer;
1280 int ret = 0;
1281
1282 if (IS_ERR(handle))
1283 return PTR_ERR(handle);
1284 if (!handle || !match_freq)
1285 return -EINVAL;
1286
1287 info = handle_to_ti_sci_info(handle);
1288
1289 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1290 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1291 (u32 *)&req, sizeof(req), sizeof(*resp));
1292 if (IS_ERR(xfer)) {
1293 ret = PTR_ERR(xfer);
1294 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1295 return ret;
1296 }
1297 req.dev_id = dev_id;
1298 req.clk_id = clk_id;
1299 req.min_freq_hz = min_freq;
1300 req.target_freq_hz = target_freq;
1301 req.max_freq_hz = max_freq;
1302
1303 ret = ti_sci_do_xfer(info, xfer);
1304 if (ret) {
1305 dev_err(info->dev, "Mbox send fail %d\n", ret);
1306 return ret;
1307 }
1308
1309 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1310
1311 if (!ti_sci_is_response_ack(resp))
1312 ret = -ENODEV;
1313 else
1314 *match_freq = resp->freq_hz;
1315
1316 return ret;
1317}
1318
1319/**
1320 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1321 * @handle: pointer to TI SCI handle
1322 * @dev_id: Device identifier this request is for
1323 * @clk_id: Clock identifier for the device for this request.
1324 * Each device has it's own set of clock inputs. This indexes
1325 * which clock input to modify.
1326 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1327 * allowable programmed frequency and does not account for clock
1328 * tolerances and jitter.
1329 * @target_freq: The target clock frequency in Hz. A frequency will be
1330 * processed as close to this target frequency as possible.
1331 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1332 * allowable programmed frequency and does not account for clock
1333 * tolerances and jitter.
1334 *
1335 * Return: 0 if all went well, else returns appropriate error value.
1336 */
1337static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1338 u32 dev_id, u8 clk_id, u64 min_freq,
1339 u64 target_freq, u64 max_freq)
1340{
1341 struct ti_sci_msg_req_set_clock_freq req;
1342 struct ti_sci_msg_hdr *resp;
1343 struct ti_sci_info *info;
1344 struct ti_sci_xfer *xfer;
1345 int ret = 0;
1346
1347 if (IS_ERR(handle))
1348 return PTR_ERR(handle);
1349 if (!handle)
1350 return -EINVAL;
1351
1352 info = handle_to_ti_sci_info(handle);
1353
1354 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1355 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1356 (u32 *)&req, sizeof(req), sizeof(*resp));
1357 if (IS_ERR(xfer)) {
1358 ret = PTR_ERR(xfer);
1359 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1360 return ret;
1361 }
1362 req.dev_id = dev_id;
1363 req.clk_id = clk_id;
1364 req.min_freq_hz = min_freq;
1365 req.target_freq_hz = target_freq;
1366 req.max_freq_hz = max_freq;
1367
1368 ret = ti_sci_do_xfer(info, xfer);
1369 if (ret) {
1370 dev_err(info->dev, "Mbox send fail %d\n", ret);
1371 return ret;
1372 }
1373
1374 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1375
1376 if (!ti_sci_is_response_ack(resp))
1377 return -ENODEV;
1378
1379 return ret;
1380}
1381
1382/**
1383 * ti_sci_cmd_clk_get_freq() - Get current frequency
1384 * @handle: pointer to TI SCI handle
1385 * @dev_id: Device identifier this request is for
1386 * @clk_id: Clock identifier for the device for this request.
1387 * Each device has it's own set of clock inputs. This indexes
1388 * which clock input to modify.
1389 * @freq: Currently frequency in Hz
1390 *
1391 * Return: 0 if all went well, else returns appropriate error value.
1392 */
1393static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1394 u32 dev_id, u8 clk_id, u64 *freq)
1395{
1396 struct ti_sci_msg_resp_get_clock_freq *resp;
1397 struct ti_sci_msg_req_get_clock_freq req;
1398 struct ti_sci_info *info;
1399 struct ti_sci_xfer *xfer;
1400 int ret = 0;
1401
1402 if (IS_ERR(handle))
1403 return PTR_ERR(handle);
1404 if (!handle || !freq)
1405 return -EINVAL;
1406
1407 info = handle_to_ti_sci_info(handle);
1408
1409 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1410 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1411 (u32 *)&req, sizeof(req), sizeof(*resp));
1412 if (IS_ERR(xfer)) {
1413 ret = PTR_ERR(xfer);
1414 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1415 return ret;
1416 }
1417 req.dev_id = dev_id;
1418 req.clk_id = clk_id;
1419
1420 ret = ti_sci_do_xfer(info, xfer);
1421 if (ret) {
1422 dev_err(info->dev, "Mbox send fail %d\n", ret);
1423 return ret;
1424 }
1425
1426 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1427
1428 if (!ti_sci_is_response_ack(resp))
1429 ret = -ENODEV;
1430 else
1431 *freq = resp->freq_hz;
1432
1433 return ret;
1434}
1435
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301436/**
1437 * ti_sci_cmd_core_reboot() - Command to request system reset
1438 * @handle: pointer to TI SCI handle
1439 *
1440 * Return: 0 if all went well, else returns appropriate error value.
1441 */
1442static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1443{
1444 struct ti_sci_msg_req_reboot req;
1445 struct ti_sci_msg_hdr *resp;
1446 struct ti_sci_info *info;
1447 struct ti_sci_xfer *xfer;
1448 int ret = 0;
1449
1450 if (IS_ERR(handle))
1451 return PTR_ERR(handle);
1452 if (!handle)
1453 return -EINVAL;
1454
1455 info = handle_to_ti_sci_info(handle);
1456
1457 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1458 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1459 (u32 *)&req, sizeof(req), sizeof(*resp));
1460 if (IS_ERR(xfer)) {
1461 ret = PTR_ERR(xfer);
1462 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1463 return ret;
1464 }
1465
1466 ret = ti_sci_do_xfer(info, xfer);
1467 if (ret) {
1468 dev_err(dev, "Mbox send fail %d\n", ret);
1469 return ret;
1470 }
1471
1472 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1473
1474 if (!ti_sci_is_response_ack(resp))
1475 return -ENODEV;
1476
1477 return ret;
1478}
1479
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301480static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1481 u16 *type)
1482{
1483 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1484 bool found = false;
1485 int i;
1486
1487 /* If map is not provided then assume dev_id is used as type */
1488 if (!rm_type_map) {
1489 *type = dev_id;
1490 return 0;
1491 }
1492
1493 for (i = 0; rm_type_map[i].dev_id; i++) {
1494 if (rm_type_map[i].dev_id == dev_id) {
1495 *type = rm_type_map[i].type;
1496 found = true;
1497 break;
1498 }
1499 }
1500
1501 if (!found)
1502 return -EINVAL;
1503
1504 return 0;
1505}
1506
1507/**
1508 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1509 * to a host. Resource is uniquely identified by
1510 * type and subtype.
1511 * @handle: Pointer to TISCI handle.
1512 * @dev_id: TISCI device ID.
1513 * @subtype: Resource assignment subtype that is being requested
1514 * from the given device.
1515 * @s_host: Host processor ID to which the resources are allocated
1516 * @range_start: Start index of the resource range
1517 * @range_num: Number of resources in the range
1518 *
1519 * Return: 0 if all went fine, else return appropriate error.
1520 */
1521static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1522 u32 dev_id, u8 subtype, u8 s_host,
1523 u16 *range_start, u16 *range_num)
1524{
1525 struct ti_sci_msg_resp_get_resource_range *resp;
1526 struct ti_sci_msg_req_get_resource_range req;
1527 struct ti_sci_xfer *xfer;
1528 struct ti_sci_info *info;
1529 u16 type;
1530 int ret = 0;
1531
1532 if (IS_ERR(handle))
1533 return PTR_ERR(handle);
1534 if (!handle)
1535 return -EINVAL;
1536
1537 info = handle_to_ti_sci_info(handle);
1538
1539 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1540 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1541 (u32 *)&req, sizeof(req), sizeof(*resp));
1542 if (IS_ERR(xfer)) {
1543 ret = PTR_ERR(xfer);
1544 dev_err(dev, "Message alloc failed(%d)\n", ret);
1545 return ret;
1546 }
1547
1548 ret = ti_sci_get_resource_type(info, dev_id, &type);
1549 if (ret) {
1550 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1551 goto fail;
1552 }
1553
1554 req.secondary_host = s_host;
1555 req.type = type & MSG_RM_RESOURCE_TYPE_MASK;
1556 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1557
1558 ret = ti_sci_do_xfer(info, xfer);
1559 if (ret) {
1560 dev_err(dev, "Mbox send fail %d\n", ret);
1561 goto fail;
1562 }
1563
1564 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1565 if (!ti_sci_is_response_ack(resp)) {
1566 ret = -ENODEV;
1567 } else if (!resp->range_start && !resp->range_num) {
1568 ret = -ENODEV;
1569 } else {
1570 *range_start = resp->range_start;
1571 *range_num = resp->range_num;
1572 };
1573
1574fail:
1575 return ret;
1576}
1577
1578/**
1579 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1580 * that is same as ti sci interface host.
1581 * @handle: Pointer to TISCI handle.
1582 * @dev_id: TISCI device ID.
1583 * @subtype: Resource assignment subtype that is being requested
1584 * from the given device.
1585 * @range_start: Start index of the resource range
1586 * @range_num: Number of resources in the range
1587 *
1588 * Return: 0 if all went fine, else return appropriate error.
1589 */
1590static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1591 u32 dev_id, u8 subtype,
1592 u16 *range_start, u16 *range_num)
1593{
1594 return ti_sci_get_resource_range(handle, dev_id, subtype,
1595 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1596 range_start, range_num);
1597}
1598
1599/**
1600 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1601 * assigned to a specified host.
1602 * @handle: Pointer to TISCI handle.
1603 * @dev_id: TISCI device ID.
1604 * @subtype: Resource assignment subtype that is being requested
1605 * from the given device.
1606 * @s_host: Host processor ID to which the resources are allocated
1607 * @range_start: Start index of the resource range
1608 * @range_num: Number of resources in the range
1609 *
1610 * Return: 0 if all went fine, else return appropriate error.
1611 */
1612static
1613int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1614 u32 dev_id, u8 subtype, u8 s_host,
1615 u16 *range_start, u16 *range_num)
1616{
1617 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1618 range_start, range_num);
1619}
1620
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301621/**
Lokesh Vutla826eb742019-03-08 11:47:32 +05301622 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1623 * @handle: pointer to TI SCI handle
1624 * @msms_start: MSMC start as returned by tisci
1625 * @msmc_end: MSMC end as returned by tisci
1626 *
1627 * Return: 0 if all went well, else returns appropriate error value.
1628 */
1629static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1630 u64 *msmc_start, u64 *msmc_end)
1631{
1632 struct ti_sci_msg_resp_query_msmc *resp;
1633 struct ti_sci_msg_hdr req;
1634 struct ti_sci_info *info;
1635 struct ti_sci_xfer *xfer;
1636 int ret = 0;
1637
1638 if (IS_ERR(handle))
1639 return PTR_ERR(handle);
1640 if (!handle)
1641 return -EINVAL;
1642
1643 info = handle_to_ti_sci_info(handle);
1644
1645 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1646 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1647 (u32 *)&req, sizeof(req), sizeof(*resp));
1648 if (IS_ERR(xfer)) {
1649 ret = PTR_ERR(xfer);
1650 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1651 return ret;
1652 }
1653
1654 ret = ti_sci_do_xfer(info, xfer);
1655 if (ret) {
1656 dev_err(dev, "Mbox send fail %d\n", ret);
1657 return ret;
1658 }
1659
1660 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1661
1662 if (!ti_sci_is_response_ack(resp))
1663 return -ENODEV;
1664
1665 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1666 resp->msmc_start_low;
1667 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1668 resp->msmc_end_low;
1669
1670 return ret;
1671}
1672
1673/**
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301674 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1675 * @handle: Pointer to TI SCI handle
1676 * @proc_id: Processor ID this request is for
1677 *
1678 * Return: 0 if all went well, else returns appropriate error value.
1679 */
1680static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1681 u8 proc_id)
1682{
1683 struct ti_sci_msg_req_proc_request req;
1684 struct ti_sci_msg_hdr *resp;
1685 struct ti_sci_info *info;
1686 struct ti_sci_xfer *xfer;
1687 int ret = 0;
1688
1689 if (IS_ERR(handle))
1690 return PTR_ERR(handle);
1691 if (!handle)
1692 return -EINVAL;
1693
1694 info = handle_to_ti_sci_info(handle);
1695
1696 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1697 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1698 (u32 *)&req, sizeof(req), sizeof(*resp));
1699 if (IS_ERR(xfer)) {
1700 ret = PTR_ERR(xfer);
1701 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1702 return ret;
1703 }
1704 req.processor_id = proc_id;
1705
1706 ret = ti_sci_do_xfer(info, xfer);
1707 if (ret) {
1708 dev_err(info->dev, "Mbox send fail %d\n", ret);
1709 return ret;
1710 }
1711
1712 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1713
1714 if (!ti_sci_is_response_ack(resp))
1715 ret = -ENODEV;
1716
1717 return ret;
1718}
1719
1720/**
1721 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1722 * @handle: Pointer to TI SCI handle
1723 * @proc_id: Processor ID this request is for
1724 *
1725 * Return: 0 if all went well, else returns appropriate error value.
1726 */
1727static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1728 u8 proc_id)
1729{
1730 struct ti_sci_msg_req_proc_release req;
1731 struct ti_sci_msg_hdr *resp;
1732 struct ti_sci_info *info;
1733 struct ti_sci_xfer *xfer;
1734 int ret = 0;
1735
1736 if (IS_ERR(handle))
1737 return PTR_ERR(handle);
1738 if (!handle)
1739 return -EINVAL;
1740
1741 info = handle_to_ti_sci_info(handle);
1742
1743 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1744 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1745 (u32 *)&req, sizeof(req), sizeof(*resp));
1746 if (IS_ERR(xfer)) {
1747 ret = PTR_ERR(xfer);
1748 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1749 return ret;
1750 }
1751 req.processor_id = proc_id;
1752
1753 ret = ti_sci_do_xfer(info, xfer);
1754 if (ret) {
1755 dev_err(info->dev, "Mbox send fail %d\n", ret);
1756 return ret;
1757 }
1758
1759 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1760
1761 if (!ti_sci_is_response_ack(resp))
1762 ret = -ENODEV;
1763
1764 return ret;
1765}
1766
1767/**
1768 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1769 * control to a host in the processor's access
1770 * control list.
1771 * @handle: Pointer to TI SCI handle
1772 * @proc_id: Processor ID this request is for
1773 * @host_id: Host ID to get the control of the processor
1774 *
1775 * Return: 0 if all went well, else returns appropriate error value.
1776 */
1777static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1778 u8 proc_id, u8 host_id)
1779{
1780 struct ti_sci_msg_req_proc_handover req;
1781 struct ti_sci_msg_hdr *resp;
1782 struct ti_sci_info *info;
1783 struct ti_sci_xfer *xfer;
1784 int ret = 0;
1785
1786 if (IS_ERR(handle))
1787 return PTR_ERR(handle);
1788 if (!handle)
1789 return -EINVAL;
1790
1791 info = handle_to_ti_sci_info(handle);
1792
1793 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1794 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1795 (u32 *)&req, sizeof(req), sizeof(*resp));
1796 if (IS_ERR(xfer)) {
1797 ret = PTR_ERR(xfer);
1798 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1799 return ret;
1800 }
1801 req.processor_id = proc_id;
1802 req.host_id = host_id;
1803
1804 ret = ti_sci_do_xfer(info, xfer);
1805 if (ret) {
1806 dev_err(info->dev, "Mbox send fail %d\n", ret);
1807 return ret;
1808 }
1809
1810 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1811
1812 if (!ti_sci_is_response_ack(resp))
1813 ret = -ENODEV;
1814
1815 return ret;
1816}
1817
1818/**
1819 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1820 * configuration flags
1821 * @handle: Pointer to TI SCI handle
1822 * @proc_id: Processor ID this request is for
1823 * @config_flags_set: Configuration flags to be set
1824 * @config_flags_clear: Configuration flags to be cleared.
1825 *
1826 * Return: 0 if all went well, else returns appropriate error value.
1827 */
1828static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1829 u8 proc_id, u64 bootvector,
1830 u32 config_flags_set,
1831 u32 config_flags_clear)
1832{
1833 struct ti_sci_msg_req_set_proc_boot_config req;
1834 struct ti_sci_msg_hdr *resp;
1835 struct ti_sci_info *info;
1836 struct ti_sci_xfer *xfer;
1837 int ret = 0;
1838
1839 if (IS_ERR(handle))
1840 return PTR_ERR(handle);
1841 if (!handle)
1842 return -EINVAL;
1843
1844 info = handle_to_ti_sci_info(handle);
1845
1846 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1847 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1848 (u32 *)&req, sizeof(req), sizeof(*resp));
1849 if (IS_ERR(xfer)) {
1850 ret = PTR_ERR(xfer);
1851 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1852 return ret;
1853 }
1854 req.processor_id = proc_id;
1855 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1856 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1857 TISCI_ADDR_HIGH_SHIFT;
1858 req.config_flags_set = config_flags_set;
1859 req.config_flags_clear = config_flags_clear;
1860
1861 ret = ti_sci_do_xfer(info, xfer);
1862 if (ret) {
1863 dev_err(info->dev, "Mbox send fail %d\n", ret);
1864 return ret;
1865 }
1866
1867 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1868
1869 if (!ti_sci_is_response_ack(resp))
1870 ret = -ENODEV;
1871
1872 return ret;
1873}
1874
1875/**
1876 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1877 * control flags
1878 * @handle: Pointer to TI SCI handle
1879 * @proc_id: Processor ID this request is for
1880 * @control_flags_set: Control flags to be set
1881 * @control_flags_clear: Control flags to be cleared
1882 *
1883 * Return: 0 if all went well, else returns appropriate error value.
1884 */
1885static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1886 u8 proc_id, u32 control_flags_set,
1887 u32 control_flags_clear)
1888{
1889 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1890 struct ti_sci_msg_hdr *resp;
1891 struct ti_sci_info *info;
1892 struct ti_sci_xfer *xfer;
1893 int ret = 0;
1894
1895 if (IS_ERR(handle))
1896 return PTR_ERR(handle);
1897 if (!handle)
1898 return -EINVAL;
1899
1900 info = handle_to_ti_sci_info(handle);
1901
1902 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1903 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1904 (u32 *)&req, sizeof(req), sizeof(*resp));
1905 if (IS_ERR(xfer)) {
1906 ret = PTR_ERR(xfer);
1907 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1908 return ret;
1909 }
1910 req.processor_id = proc_id;
1911 req.control_flags_set = control_flags_set;
1912 req.control_flags_clear = control_flags_clear;
1913
1914 ret = ti_sci_do_xfer(info, xfer);
1915 if (ret) {
1916 dev_err(info->dev, "Mbox send fail %d\n", ret);
1917 return ret;
1918 }
1919
1920 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1921
1922 if (!ti_sci_is_response_ack(resp))
1923 ret = -ENODEV;
1924
1925 return ret;
1926}
1927
1928/**
1929 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1930 * image and then set the processor configuration flags.
1931 * @handle: Pointer to TI SCI handle
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001932 * @image_addr: Memory address at which payload image and certificate is
1933 * located in memory, this is updated if the image data is
1934 * moved during authentication.
1935 * @image_size: This is updated with the final size of the image after
1936 * authentication.
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301937 *
1938 * Return: 0 if all went well, else returns appropriate error value.
1939 */
1940static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001941 u64 *image_addr, u32 *image_size)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301942{
1943 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001944 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301945 struct ti_sci_info *info;
1946 struct ti_sci_xfer *xfer;
1947 int ret = 0;
1948
1949 if (IS_ERR(handle))
1950 return PTR_ERR(handle);
1951 if (!handle)
1952 return -EINVAL;
1953
1954 info = handle_to_ti_sci_info(handle);
1955
1956 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1957 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1958 (u32 *)&req, sizeof(req), sizeof(*resp));
1959 if (IS_ERR(xfer)) {
1960 ret = PTR_ERR(xfer);
1961 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1962 return ret;
1963 }
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001964 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1965 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301966 TISCI_ADDR_HIGH_SHIFT;
1967
1968 ret = ti_sci_do_xfer(info, xfer);
1969 if (ret) {
1970 dev_err(info->dev, "Mbox send fail %d\n", ret);
1971 return ret;
1972 }
1973
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001974 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301975
1976 if (!ti_sci_is_response_ack(resp))
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001977 return -ENODEV;
1978
1979 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
1980 (((u64)resp->image_addr_high <<
1981 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1982 *image_size = resp->image_size;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301983
1984 return ret;
1985}
1986
1987/**
1988 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1989 * @handle: Pointer to TI SCI handle
1990 * @proc_id: Processor ID this request is for
1991 *
1992 * Return: 0 if all went well, else returns appropriate error value.
1993 */
1994static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1995 u8 proc_id, u64 *bv, u32 *cfg_flags,
1996 u32 *ctrl_flags, u32 *sts_flags)
1997{
1998 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1999 struct ti_sci_msg_req_get_proc_boot_status req;
2000 struct ti_sci_info *info;
2001 struct ti_sci_xfer *xfer;
2002 int ret = 0;
2003
2004 if (IS_ERR(handle))
2005 return PTR_ERR(handle);
2006 if (!handle)
2007 return -EINVAL;
2008
2009 info = handle_to_ti_sci_info(handle);
2010
2011 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2012 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2013 (u32 *)&req, sizeof(req), sizeof(*resp));
2014 if (IS_ERR(xfer)) {
2015 ret = PTR_ERR(xfer);
2016 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2017 return ret;
2018 }
2019 req.processor_id = proc_id;
2020
2021 ret = ti_sci_do_xfer(info, xfer);
2022 if (ret) {
2023 dev_err(info->dev, "Mbox send fail %d\n", ret);
2024 return ret;
2025 }
2026
2027 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2028 xfer->tx_message.buf;
2029
2030 if (!ti_sci_is_response_ack(resp))
2031 return -ENODEV;
2032 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2033 (((u64)resp->bootvector_high <<
2034 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2035 *cfg_flags = resp->config_flags;
2036 *ctrl_flags = resp->control_flags;
2037 *sts_flags = resp->status_flags;
2038
2039 return ret;
2040}
2041
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302042/**
2043 * ti_sci_cmd_ring_config() - configure RA ring
2044 * @handle: pointer to TI SCI handle
2045 * @valid_params: Bitfield defining validity of ring configuration parameters.
2046 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2047 * @index: Ring index.
2048 * @addr_lo: The ring base address lo 32 bits
2049 * @addr_hi: The ring base address hi 32 bits
2050 * @count: Number of ring elements.
2051 * @mode: The mode of the ring
2052 * @size: The ring element size.
2053 * @order_id: Specifies the ring's bus order ID.
2054 *
2055 * Return: 0 if all went well, else returns appropriate error value.
2056 *
2057 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2058 */
2059static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2060 u32 valid_params, u16 nav_id, u16 index,
2061 u32 addr_lo, u32 addr_hi, u32 count,
2062 u8 mode, u8 size, u8 order_id)
2063{
2064 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2065 struct ti_sci_msg_rm_ring_cfg_req req;
2066 struct ti_sci_xfer *xfer;
2067 struct ti_sci_info *info;
2068 int ret = 0;
2069
2070 if (IS_ERR(handle))
2071 return PTR_ERR(handle);
2072 if (!handle)
2073 return -EINVAL;
2074
2075 info = handle_to_ti_sci_info(handle);
2076
2077 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2078 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2079 (u32 *)&req, sizeof(req), sizeof(*resp));
2080 if (IS_ERR(xfer)) {
2081 ret = PTR_ERR(xfer);
2082 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2083 return ret;
2084 }
2085 req.valid_params = valid_params;
2086 req.nav_id = nav_id;
2087 req.index = index;
2088 req.addr_lo = addr_lo;
2089 req.addr_hi = addr_hi;
2090 req.count = count;
2091 req.mode = mode;
2092 req.size = size;
2093 req.order_id = order_id;
2094
2095 ret = ti_sci_do_xfer(info, xfer);
2096 if (ret) {
2097 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2098 goto fail;
2099 }
2100
2101 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2102
2103 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2104
2105fail:
2106 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2107 return ret;
2108}
2109
2110/**
2111 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2112 * @handle: pointer to TI SCI handle
2113 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2114 * @index: Ring index.
2115 * @addr_lo: returns ring's base address lo 32 bits
2116 * @addr_hi: returns ring's base address hi 32 bits
2117 * @count: returns number of ring elements.
2118 * @mode: returns mode of the ring
2119 * @size: returns ring element size.
2120 * @order_id: returns ring's bus order ID.
2121 *
2122 * Return: 0 if all went well, else returns appropriate error value.
2123 *
2124 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2125 */
2126static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2127 u32 nav_id, u32 index, u8 *mode,
2128 u32 *addr_lo, u32 *addr_hi,
2129 u32 *count, u8 *size, u8 *order_id)
2130{
2131 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2132 struct ti_sci_msg_rm_ring_get_cfg_req req;
2133 struct ti_sci_xfer *xfer;
2134 struct ti_sci_info *info;
2135 int ret = 0;
2136
2137 if (IS_ERR(handle))
2138 return PTR_ERR(handle);
2139 if (!handle)
2140 return -EINVAL;
2141
2142 info = handle_to_ti_sci_info(handle);
2143
2144 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2145 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2146 (u32 *)&req, sizeof(req), sizeof(*resp));
2147 if (IS_ERR(xfer)) {
2148 ret = PTR_ERR(xfer);
2149 dev_err(info->dev,
2150 "RM_RA:Message get config failed(%d)\n", ret);
2151 return ret;
2152 }
2153 req.nav_id = nav_id;
2154 req.index = index;
2155
2156 ret = ti_sci_do_xfer(info, xfer);
2157 if (ret) {
2158 dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2159 goto fail;
2160 }
2161
2162 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf;
2163
2164 if (!ti_sci_is_response_ack(resp)) {
2165 ret = -ENODEV;
2166 } else {
2167 if (mode)
2168 *mode = resp->mode;
2169 if (addr_lo)
2170 *addr_lo = resp->addr_lo;
2171 if (addr_hi)
2172 *addr_hi = resp->addr_hi;
2173 if (count)
2174 *count = resp->count;
2175 if (size)
2176 *size = resp->size;
2177 if (order_id)
2178 *order_id = resp->order_id;
2179 };
2180
2181fail:
2182 dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2183 return ret;
2184}
2185
2186static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2187 u32 nav_id, u32 src_thread, u32 dst_thread)
2188{
2189 struct ti_sci_msg_hdr *resp;
2190 struct ti_sci_msg_psil_pair req;
2191 struct ti_sci_xfer *xfer;
2192 struct ti_sci_info *info;
2193 int ret = 0;
2194
2195 if (IS_ERR(handle))
2196 return PTR_ERR(handle);
2197 if (!handle)
2198 return -EINVAL;
2199
2200 info = handle_to_ti_sci_info(handle);
2201
2202 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2203 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2204 (u32 *)&req, sizeof(req), sizeof(*resp));
2205 if (IS_ERR(xfer)) {
2206 ret = PTR_ERR(xfer);
2207 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2208 return ret;
2209 }
2210 req.nav_id = nav_id;
2211 req.src_thread = src_thread;
2212 req.dst_thread = dst_thread;
2213
2214 ret = ti_sci_do_xfer(info, xfer);
2215 if (ret) {
2216 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2217 goto fail;
2218 }
2219
2220 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2221 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2222
2223fail:
2224 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2225 nav_id, src_thread, dst_thread, ret);
2226 return ret;
2227}
2228
2229static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2230 u32 nav_id, u32 src_thread, u32 dst_thread)
2231{
2232 struct ti_sci_msg_hdr *resp;
2233 struct ti_sci_msg_psil_unpair req;
2234 struct ti_sci_xfer *xfer;
2235 struct ti_sci_info *info;
2236 int ret = 0;
2237
2238 if (IS_ERR(handle))
2239 return PTR_ERR(handle);
2240 if (!handle)
2241 return -EINVAL;
2242
2243 info = handle_to_ti_sci_info(handle);
2244
2245 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2246 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2247 (u32 *)&req, sizeof(req), sizeof(*resp));
2248 if (IS_ERR(xfer)) {
2249 ret = PTR_ERR(xfer);
2250 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2251 return ret;
2252 }
2253 req.nav_id = nav_id;
2254 req.src_thread = src_thread;
2255 req.dst_thread = dst_thread;
2256
2257 ret = ti_sci_do_xfer(info, xfer);
2258 if (ret) {
2259 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2260 goto fail;
2261 }
2262
2263 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2264 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2265
2266fail:
2267 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2268 src_thread, dst_thread, ret);
2269 return ret;
2270}
2271
2272static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2273 const struct ti_sci_handle *handle,
2274 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2275{
2276 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2277 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2278 struct ti_sci_xfer *xfer;
2279 struct ti_sci_info *info;
2280 int ret = 0;
2281
2282 if (IS_ERR(handle))
2283 return PTR_ERR(handle);
2284 if (!handle)
2285 return -EINVAL;
2286
2287 info = handle_to_ti_sci_info(handle);
2288
2289 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2290 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2291 (u32 *)&req, sizeof(req), sizeof(*resp));
2292 if (IS_ERR(xfer)) {
2293 ret = PTR_ERR(xfer);
2294 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2295 return ret;
2296 }
2297 req.valid_params = params->valid_params;
2298 req.nav_id = params->nav_id;
2299 req.index = params->index;
2300 req.tx_pause_on_err = params->tx_pause_on_err;
2301 req.tx_filt_einfo = params->tx_filt_einfo;
2302 req.tx_filt_pswords = params->tx_filt_pswords;
2303 req.tx_atype = params->tx_atype;
2304 req.tx_chan_type = params->tx_chan_type;
2305 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2306 req.tx_fetch_size = params->tx_fetch_size;
2307 req.tx_credit_count = params->tx_credit_count;
2308 req.txcq_qnum = params->txcq_qnum;
2309 req.tx_priority = params->tx_priority;
2310 req.tx_qos = params->tx_qos;
2311 req.tx_orderid = params->tx_orderid;
2312 req.fdepth = params->fdepth;
2313 req.tx_sched_priority = params->tx_sched_priority;
2314
2315 ret = ti_sci_do_xfer(info, xfer);
2316 if (ret) {
2317 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2318 goto fail;
2319 }
2320
2321 resp =
2322 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2323 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2324
2325fail:
2326 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2327 return ret;
2328}
2329
2330static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2331 const struct ti_sci_handle *handle,
2332 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2333{
2334 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2335 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2336 struct ti_sci_xfer *xfer;
2337 struct ti_sci_info *info;
2338 int ret = 0;
2339
2340 if (IS_ERR(handle))
2341 return PTR_ERR(handle);
2342 if (!handle)
2343 return -EINVAL;
2344
2345 info = handle_to_ti_sci_info(handle);
2346
2347 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2348 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2349 (u32 *)&req, sizeof(req), sizeof(*resp));
2350 if (IS_ERR(xfer)) {
2351 ret = PTR_ERR(xfer);
2352 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2353 return ret;
2354 }
2355
2356 req.valid_params = params->valid_params;
2357 req.nav_id = params->nav_id;
2358 req.index = params->index;
2359 req.rx_fetch_size = params->rx_fetch_size;
2360 req.rxcq_qnum = params->rxcq_qnum;
2361 req.rx_priority = params->rx_priority;
2362 req.rx_qos = params->rx_qos;
2363 req.rx_orderid = params->rx_orderid;
2364 req.rx_sched_priority = params->rx_sched_priority;
2365 req.flowid_start = params->flowid_start;
2366 req.flowid_cnt = params->flowid_cnt;
2367 req.rx_pause_on_err = params->rx_pause_on_err;
2368 req.rx_atype = params->rx_atype;
2369 req.rx_chan_type = params->rx_chan_type;
2370 req.rx_ignore_short = params->rx_ignore_short;
2371 req.rx_ignore_long = params->rx_ignore_long;
2372
2373 ret = ti_sci_do_xfer(info, xfer);
2374 if (ret) {
2375 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2376 goto fail;
2377 }
2378
2379 resp =
2380 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2381 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2382
2383fail:
2384 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2385 return ret;
2386}
2387
2388static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2389 const struct ti_sci_handle *handle,
2390 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2391{
2392 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2393 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2394 struct ti_sci_xfer *xfer;
2395 struct ti_sci_info *info;
2396 int ret = 0;
2397
2398 if (IS_ERR(handle))
2399 return PTR_ERR(handle);
2400 if (!handle)
2401 return -EINVAL;
2402
2403 info = handle_to_ti_sci_info(handle);
2404
2405 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2406 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2407 (u32 *)&req, sizeof(req), sizeof(*resp));
2408 if (IS_ERR(xfer)) {
2409 ret = PTR_ERR(xfer);
2410 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2411 return ret;
2412 }
2413
2414 req.valid_params = params->valid_params;
2415 req.nav_id = params->nav_id;
2416 req.flow_index = params->flow_index;
2417 req.rx_einfo_present = params->rx_einfo_present;
2418 req.rx_psinfo_present = params->rx_psinfo_present;
2419 req.rx_error_handling = params->rx_error_handling;
2420 req.rx_desc_type = params->rx_desc_type;
2421 req.rx_sop_offset = params->rx_sop_offset;
2422 req.rx_dest_qnum = params->rx_dest_qnum;
2423 req.rx_src_tag_hi = params->rx_src_tag_hi;
2424 req.rx_src_tag_lo = params->rx_src_tag_lo;
2425 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2426 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2427 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2428 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2429 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2430 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2431 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2432 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2433 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2434 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2435 req.rx_ps_location = params->rx_ps_location;
2436
2437 ret = ti_sci_do_xfer(info, xfer);
2438 if (ret) {
2439 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2440 goto fail;
2441 }
2442
2443 resp =
2444 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2445 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2446
2447fail:
2448 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2449 return ret;
2450}
2451
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002452/**
2453 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2454 * @handle: pointer to TI SCI handle
2455 * @region: region configuration parameters
2456 *
2457 * Return: 0 if all went well, else returns appropriate error value.
2458 */
2459static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2460 const struct ti_sci_msg_fwl_region *region)
2461{
2462 struct ti_sci_msg_fwl_set_firewall_region_req req;
2463 struct ti_sci_msg_hdr *resp;
2464 struct ti_sci_info *info;
2465 struct ti_sci_xfer *xfer;
2466 int ret = 0;
2467
2468 if (IS_ERR(handle))
2469 return PTR_ERR(handle);
2470 if (!handle)
2471 return -EINVAL;
2472
2473 info = handle_to_ti_sci_info(handle);
2474
2475 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2476 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2477 (u32 *)&req, sizeof(req), sizeof(*resp));
2478 if (IS_ERR(xfer)) {
2479 ret = PTR_ERR(xfer);
2480 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2481 return ret;
2482 }
2483
2484 req.fwl_id = region->fwl_id;
2485 req.region = region->region;
2486 req.n_permission_regs = region->n_permission_regs;
2487 req.control = region->control;
2488 req.permissions[0] = region->permissions[0];
2489 req.permissions[1] = region->permissions[1];
2490 req.permissions[2] = region->permissions[2];
2491 req.start_address = region->start_address;
2492 req.end_address = region->end_address;
2493
2494 ret = ti_sci_do_xfer(info, xfer);
2495 if (ret) {
2496 dev_err(info->dev, "Mbox send fail %d\n", ret);
2497 return ret;
2498 }
2499
2500 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2501
2502 if (!ti_sci_is_response_ack(resp))
2503 return -ENODEV;
2504
2505 return 0;
2506}
2507
2508/**
2509 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2510 * @handle: pointer to TI SCI handle
2511 * @region: region configuration parameters
2512 *
2513 * Return: 0 if all went well, else returns appropriate error value.
2514 */
2515static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2516 struct ti_sci_msg_fwl_region *region)
2517{
2518 struct ti_sci_msg_fwl_get_firewall_region_req req;
2519 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2520 struct ti_sci_info *info;
2521 struct ti_sci_xfer *xfer;
2522 int ret = 0;
2523
2524 if (IS_ERR(handle))
2525 return PTR_ERR(handle);
2526 if (!handle)
2527 return -EINVAL;
2528
2529 info = handle_to_ti_sci_info(handle);
2530
2531 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2532 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2533 (u32 *)&req, sizeof(req), sizeof(*resp));
2534 if (IS_ERR(xfer)) {
2535 ret = PTR_ERR(xfer);
2536 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2537 return ret;
2538 }
2539
2540 req.fwl_id = region->fwl_id;
2541 req.region = region->region;
2542 req.n_permission_regs = region->n_permission_regs;
2543
2544 ret = ti_sci_do_xfer(info, xfer);
2545 if (ret) {
2546 dev_err(info->dev, "Mbox send fail %d\n", ret);
2547 return ret;
2548 }
2549
2550 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2551
2552 if (!ti_sci_is_response_ack(resp))
2553 return -ENODEV;
2554
2555 region->fwl_id = resp->fwl_id;
2556 region->region = resp->region;
2557 region->n_permission_regs = resp->n_permission_regs;
2558 region->control = resp->control;
2559 region->permissions[0] = resp->permissions[0];
2560 region->permissions[1] = resp->permissions[1];
2561 region->permissions[2] = resp->permissions[2];
2562 region->start_address = resp->start_address;
2563 region->end_address = resp->end_address;
2564
2565 return 0;
2566}
2567
2568/**
2569 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2570 * @handle: pointer to TI SCI handle
2571 * @region: region configuration parameters
2572 *
2573 * Return: 0 if all went well, else returns appropriate error value.
2574 */
2575static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2576 struct ti_sci_msg_fwl_owner *owner)
2577{
2578 struct ti_sci_msg_fwl_change_owner_info_req req;
2579 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2580 struct ti_sci_info *info;
2581 struct ti_sci_xfer *xfer;
2582 int ret = 0;
2583
2584 if (IS_ERR(handle))
2585 return PTR_ERR(handle);
2586 if (!handle)
2587 return -EINVAL;
2588
2589 info = handle_to_ti_sci_info(handle);
2590
Andrew F. Davisefbfd442019-04-29 09:04:11 -04002591 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2592 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002593 (u32 *)&req, sizeof(req), sizeof(*resp));
2594 if (IS_ERR(xfer)) {
2595 ret = PTR_ERR(xfer);
2596 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2597 return ret;
2598 }
2599
2600 req.fwl_id = owner->fwl_id;
2601 req.region = owner->region;
2602 req.owner_index = owner->owner_index;
2603
2604 ret = ti_sci_do_xfer(info, xfer);
2605 if (ret) {
2606 dev_err(info->dev, "Mbox send fail %d\n", ret);
2607 return ret;
2608 }
2609
2610 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2611
2612 if (!ti_sci_is_response_ack(resp))
2613 return -ENODEV;
2614
2615 owner->fwl_id = resp->fwl_id;
2616 owner->region = resp->region;
2617 owner->owner_index = resp->owner_index;
2618 owner->owner_privid = resp->owner_privid;
2619 owner->owner_permission_bits = resp->owner_permission_bits;
2620
2621 return ret;
2622}
2623
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302624/*
2625 * ti_sci_setup_ops() - Setup the operations structures
2626 * @info: pointer to TISCI pointer
2627 */
2628static void ti_sci_setup_ops(struct ti_sci_info *info)
2629{
2630 struct ti_sci_ops *ops = &info->handle.ops;
2631 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302632 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302633 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302634 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302635 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302636 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302637 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2638 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2639 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002640 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302641
2642 bops->board_config = ti_sci_cmd_set_board_config;
2643 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2644 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2645 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302646
2647 dops->get_device = ti_sci_cmd_get_device;
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +05302648 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302649 dops->idle_device = ti_sci_cmd_idle_device;
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +05302650 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302651 dops->put_device = ti_sci_cmd_put_device;
2652 dops->is_valid = ti_sci_cmd_dev_is_valid;
2653 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2654 dops->is_idle = ti_sci_cmd_dev_is_idle;
2655 dops->is_stop = ti_sci_cmd_dev_is_stop;
2656 dops->is_on = ti_sci_cmd_dev_is_on;
2657 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2658 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2659 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302660
2661 cops->get_clock = ti_sci_cmd_get_clock;
2662 cops->idle_clock = ti_sci_cmd_idle_clock;
2663 cops->put_clock = ti_sci_cmd_put_clock;
2664 cops->is_auto = ti_sci_cmd_clk_is_auto;
2665 cops->is_on = ti_sci_cmd_clk_is_on;
2666 cops->is_off = ti_sci_cmd_clk_is_off;
2667
2668 cops->set_parent = ti_sci_cmd_clk_set_parent;
2669 cops->get_parent = ti_sci_cmd_clk_get_parent;
2670 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2671
2672 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2673 cops->set_freq = ti_sci_cmd_clk_set_freq;
2674 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302675
2676 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla826eb742019-03-08 11:47:32 +05302677 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302678
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302679 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2680 rm_core_ops->get_range_from_shost =
2681 ti_sci_cmd_get_resource_range_from_shost;
2682
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302683 pops->proc_request = ti_sci_cmd_proc_request;
2684 pops->proc_release = ti_sci_cmd_proc_release;
2685 pops->proc_handover = ti_sci_cmd_proc_handover;
2686 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2687 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2688 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2689 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302690
2691 rops->config = ti_sci_cmd_ring_config;
2692 rops->get_config = ti_sci_cmd_ring_get_config;
2693
2694 psilops->pair = ti_sci_cmd_rm_psil_pair;
2695 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2696
2697 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2698 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2699 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002700
2701 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2702 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2703 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302704}
2705
2706/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302707 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2708 * @dev: Pointer to the SYSFW device
2709 *
2710 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2711 * are encountered.
2712 */
2713const
2714struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2715{
2716 if (!sci_dev)
2717 return ERR_PTR(-EINVAL);
2718
2719 struct ti_sci_info *info = dev_get_priv(sci_dev);
2720
2721 if (!info)
2722 return ERR_PTR(-EINVAL);
2723
2724 struct ti_sci_handle *handle = &info->handle;
2725
2726 if (!handle)
2727 return ERR_PTR(-EINVAL);
2728
2729 return handle;
2730}
2731
2732/**
2733 * ti_sci_get_handle() - Get the TI SCI handle for a device
2734 * @dev: Pointer to device for which we want SCI handle
2735 *
2736 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2737 * are encountered.
2738 */
2739const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2740{
2741 if (!dev)
2742 return ERR_PTR(-EINVAL);
2743
2744 struct udevice *sci_dev = dev_get_parent(dev);
2745
2746 return ti_sci_get_handle_from_sysfw(sci_dev);
2747}
2748
2749/**
2750 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2751 * @dev: device node
2752 * @propname: property name containing phandle on TISCI node
2753 *
2754 * Return: pointer to handle if successful, else appropriate error value.
2755 */
2756const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2757 const char *property)
2758{
2759 struct ti_sci_info *entry, *info = NULL;
2760 u32 phandle, err;
2761 ofnode node;
2762
2763 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2764 if (err)
2765 return ERR_PTR(err);
2766
2767 node = ofnode_get_by_phandle(phandle);
2768 if (!ofnode_valid(node))
2769 return ERR_PTR(-EINVAL);
2770
2771 list_for_each_entry(entry, &ti_sci_list, list)
2772 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2773 info = entry;
2774 break;
2775 }
2776
2777 if (!info)
2778 return ERR_PTR(-ENODEV);
2779
2780 return &info->handle;
2781}
2782
2783/**
2784 * ti_sci_of_to_info() - generate private data from device tree
2785 * @dev: corresponding system controller interface device
2786 * @info: pointer to driver specific private data
2787 *
2788 * Return: 0 if all goes good, else appropriate error message.
2789 */
2790static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2791{
2792 int ret;
2793
2794 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2795 if (ret) {
2796 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2797 __func__, ret);
2798 return ret;
2799 }
2800
2801 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2802 if (ret) {
2803 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2804 __func__, ret);
2805 return ret;
2806 }
2807
2808 /* Notify channel is optional. Enable only if populated */
2809 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2810 if (ret) {
2811 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2812 __func__, ret);
2813 }
2814
2815 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302816 info->desc->default_host_id);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302817
2818 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2819
2820 return 0;
2821}
2822
2823/**
2824 * ti_sci_probe() - Basic probe
2825 * @dev: corresponding system controller interface device
2826 *
2827 * Return: 0 if all goes good, else appropriate error message.
2828 */
2829static int ti_sci_probe(struct udevice *dev)
2830{
2831 struct ti_sci_info *info;
2832 int ret;
2833
2834 debug("%s(dev=%p)\n", __func__, dev);
2835
2836 info = dev_get_priv(dev);
2837 info->desc = (void *)dev_get_driver_data(dev);
2838
2839 ret = ti_sci_of_to_info(dev, info);
2840 if (ret) {
2841 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2842 return ret;
2843 }
2844
2845 info->dev = dev;
2846 info->seq = 0xA;
2847
2848 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302849 ti_sci_setup_ops(info);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302850
2851 ret = ti_sci_cmd_get_revision(&info->handle);
2852
2853 return ret;
2854}
2855
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302856/*
2857 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2858 * @res: Pointer to the TISCI resource
2859 *
2860 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2861 */
2862u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2863{
2864 u16 set, free_bit;
2865
2866 for (set = 0; set < res->sets; set++) {
2867 free_bit = find_first_zero_bit(res->desc[set].res_map,
2868 res->desc[set].num);
2869 if (free_bit != res->desc[set].num) {
2870 set_bit(free_bit, res->desc[set].res_map);
2871 return res->desc[set].start + free_bit;
2872 }
2873 }
2874
2875 return TI_SCI_RESOURCE_NULL;
2876}
2877
2878/**
2879 * ti_sci_release_resource() - Release a resource from TISCI resource.
2880 * @res: Pointer to the TISCI resource
2881 */
2882void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2883{
2884 u16 set;
2885
2886 for (set = 0; set < res->sets; set++) {
2887 if (res->desc[set].start <= id &&
2888 (res->desc[set].num + res->desc[set].start) > id)
2889 clear_bit(id - res->desc[set].start,
2890 res->desc[set].res_map);
2891 }
2892}
2893
2894/**
2895 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2896 * @handle: TISCI handle
2897 * @dev: Device pointer to which the resource is assigned
2898 * @of_prop: property name by which the resource are represented
2899 *
2900 * Note: This function expects of_prop to be in the form of tuples
2901 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2902 * for each of_prop. Client driver can directly call
2903 * ti_sci_(get_free, release)_resource apis for handling the resource.
2904 *
2905 * Return: Pointer to ti_sci_resource if all went well else appropriate
2906 * error pointer.
2907 */
2908struct ti_sci_resource *
2909devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2910 struct udevice *dev, u32 dev_id, char *of_prop)
2911{
2912 u32 resource_subtype;
2913 u16 resource_type;
2914 struct ti_sci_resource *res;
2915 int sets, i, ret;
2916 u32 *temp;
2917
2918 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2919 if (!res)
2920 return ERR_PTR(-ENOMEM);
2921
2922 sets = dev_read_size(dev, of_prop);
2923 if (sets < 0) {
2924 dev_err(dev, "%s resource type ids not available\n", of_prop);
2925 return ERR_PTR(sets);
2926 }
2927 temp = malloc(sets);
2928 sets /= sizeof(u32);
2929 res->sets = sets;
2930
2931 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2932 GFP_KERNEL);
2933 if (!res->desc)
2934 return ERR_PTR(-ENOMEM);
2935
2936 ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
2937 &resource_type);
2938 if (ret) {
2939 dev_err(dev, "No valid resource type for %u\n", dev_id);
2940 return ERR_PTR(-EINVAL);
2941 }
2942
2943 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2944 if (ret)
2945 return ERR_PTR(-EINVAL);
2946
2947 for (i = 0; i < res->sets; i++) {
2948 resource_subtype = temp[i];
2949 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2950 resource_subtype,
2951 &res->desc[i].start,
2952 &res->desc[i].num);
2953 if (ret) {
2954 dev_err(dev, "type %d subtype %d not allocated for host %d\n",
2955 resource_type, resource_subtype,
2956 handle_to_ti_sci_info(handle)->host_id);
2957 return ERR_PTR(ret);
2958 }
2959
2960 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
2961 resource_type, resource_subtype, res->desc[i].start,
2962 res->desc[i].num);
2963
2964 res->desc[i].res_map =
2965 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2966 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2967 if (!res->desc[i].res_map)
2968 return ERR_PTR(-ENOMEM);
2969 }
2970
2971 return res;
2972}
2973
2974/* Description for K2G */
2975static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2976 .default_host_id = 2,
2977 /* Conservative duration */
2978 .max_rx_timeout_ms = 10000,
2979 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2980 .max_msgs = 20,
2981 .max_msg_size = 64,
2982 .rm_type_map = NULL,
2983};
2984
2985static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2986 {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2987 {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2988 {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2989 {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2990 {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2991 {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2992 {.dev_id = 0, .type = 0x000}, /* end of table */
2993};
2994
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302995/* Description for AM654 */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302996static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2997 .default_host_id = 12,
2998 /* Conservative duration */
2999 .max_rx_timeout_ms = 10000,
3000 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3001 .max_msgs = 20,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303002 .max_msg_size = 60,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303003 .rm_type_map = ti_sci_am654_rm_type_map,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303004};
3005
3006static const struct udevice_id ti_sci_ids[] = {
3007 {
3008 .compatible = "ti,k2g-sci",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303009 .data = (ulong)&ti_sci_pmmc_k2g_desc
3010 },
3011 {
3012 .compatible = "ti,am654-sci",
3013 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303014 },
3015 { /* Sentinel */ },
3016};
3017
3018U_BOOT_DRIVER(ti_sci) = {
3019 .name = "ti_sci",
3020 .id = UCLASS_FIRMWARE,
3021 .of_match = ti_sci_ids,
3022 .probe = ti_sci_probe,
3023 .priv_auto_alloc_size = sizeof(struct ti_sci_info),
3024};