blob: d47d22fff3e4661e36bbbd876f1cd10815257411 [file] [log] [blame]
Lokesh Vutla32cd2512018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <mailbox.h>
14#include <dm/device.h>
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053015#include <linux/compat.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053016#include <linux/err.h>
17#include <linux/soc/ti/k3-sec-proxy.h>
18#include <linux/soc/ti/ti_sci_protocol.h>
19
20#include "ti_sci.h"
21
22/* List of all TI SCI devices active in system */
23static LIST_HEAD(ti_sci_list);
24
25/**
26 * struct ti_sci_xfer - Structure representing a message flow
27 * @tx_message: Transmit message
28 * @rx_len: Receive message length
29 */
30struct ti_sci_xfer {
31 struct k3_sec_proxy_msg tx_message;
32 u8 rx_len;
33};
34
35/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053036 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
37 * management representation of dev_ids.
38 * @dev_id: TISCI device ID
39 * @type: Corresponding id as identified by TISCI RM.
40 *
41 * Note: This is used only as a work around for using RM range apis
42 * for AM654 SoC. For future SoCs dev_id will be used as type
43 * for RM range APIs. In order to maintain ABI backward compatibility
44 * type is not being changed for AM654 SoC.
45 */
46struct ti_sci_rm_type_map {
47 u32 dev_id;
48 u16 type;
49};
50
51/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +053052 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053053 * @default_host_id: Host identifier representing the compute entity
54 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
55 * @max_msgs: Maximum number of messages that can be pending
56 * simultaneously in the system
57 * @max_msg_size: Maximum size of data per message that can be handled.
58 * @rm_type_map: RM resource type mapping structure.
Lokesh Vutla32cd2512018-08-27 15:57:32 +053059 */
60struct ti_sci_desc {
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053061 u8 default_host_id;
62 int max_rx_timeout_ms;
63 int max_msgs;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053064 int max_msg_size;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053065 struct ti_sci_rm_type_map *rm_type_map;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053066};
67
68/**
69 * struct ti_sci_info - Structure representing a TI SCI instance
70 * @dev: Device pointer
71 * @desc: SoC description for this instance
72 * @handle: Instance of TI SCI handle to send to clients.
73 * @chan_tx: Transmit mailbox channel
74 * @chan_rx: Receive mailbox channel
75 * @xfer: xfer info
76 * @list: list head
77 * @is_secure: Determines if the communication is through secure threads.
78 * @host_id: Host identifier representing the compute entity
79 * @seq: Seq id used for verification for tx and rx message.
80 */
81struct ti_sci_info {
82 struct udevice *dev;
83 const struct ti_sci_desc *desc;
84 struct ti_sci_handle handle;
85 struct mbox_chan chan_tx;
86 struct mbox_chan chan_rx;
87 struct mbox_chan chan_notify;
88 struct ti_sci_xfer xfer;
89 struct list_head list;
90 bool is_secure;
91 u8 host_id;
92 u8 seq;
93};
94
95#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
96
97/**
98 * ti_sci_setup_one_xfer() - Setup one message type
99 * @info: Pointer to SCI entity information
100 * @msg_type: Message type
101 * @msg_flags: Flag to set for the message
102 * @buf: Buffer to be send to mailbox channel
103 * @tx_message_size: transmit message size
104 * @rx_message_size: receive message size
105 *
106 * Helper function which is used by various command functions that are
107 * exposed to clients of this driver for allocating a message traffic event.
108 *
109 * Return: Corresponding ti_sci_xfer pointer if all went fine,
110 * else appropriate error pointer.
111 */
112static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
113 u16 msg_type, u32 msg_flags,
114 u32 *buf,
115 size_t tx_message_size,
116 size_t rx_message_size)
117{
118 struct ti_sci_xfer *xfer = &info->xfer;
119 struct ti_sci_msg_hdr *hdr;
120
121 /* Ensure we have sane transfer sizes */
122 if (rx_message_size > info->desc->max_msg_size ||
123 tx_message_size > info->desc->max_msg_size ||
124 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
125 return ERR_PTR(-ERANGE);
126
127 info->seq = ~info->seq;
128 xfer->tx_message.buf = buf;
129 xfer->tx_message.len = tx_message_size;
130 xfer->rx_len = (u8)rx_message_size;
131
132 hdr = (struct ti_sci_msg_hdr *)buf;
133 hdr->seq = info->seq;
134 hdr->type = msg_type;
135 hdr->host = info->host_id;
136 hdr->flags = msg_flags;
137
138 return xfer;
139}
140
141/**
142 * ti_sci_get_response() - Receive response from mailbox channel
143 * @info: Pointer to SCI entity information
144 * @xfer: Transfer to initiate and wait for response
145 * @chan: Channel to receive the response
146 *
147 * Return: -ETIMEDOUT in case of no response, if transmit error,
148 * return corresponding error, else if all goes well,
149 * return 0.
150 */
151static inline int ti_sci_get_response(struct ti_sci_info *info,
152 struct ti_sci_xfer *xfer,
153 struct mbox_chan *chan)
154{
155 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
156 struct ti_sci_secure_msg_hdr *secure_hdr;
157 struct ti_sci_msg_hdr *hdr;
158 int ret;
159
160 /* Receive the response */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +0530161 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530162 if (ret) {
163 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
164 __func__, ret);
165 return ret;
166 }
167
168 /* ToDo: Verify checksum */
169 if (info->is_secure) {
170 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
171 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
172 }
173
174 /* msg is updated by mailbox driver */
175 hdr = (struct ti_sci_msg_hdr *)msg->buf;
176
177 /* Sanity check for message response */
178 if (hdr->seq != info->seq) {
179 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
180 __func__, hdr->seq);
181 return ret;
182 }
183
184 if (msg->len > info->desc->max_msg_size) {
185 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
186 __func__, msg->len, info->desc->max_msg_size);
187 return -EINVAL;
188 }
189
190 if (msg->len < xfer->rx_len) {
191 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
192 __func__, msg->len, xfer->rx_len);
193 }
194
195 return ret;
196}
197
198/**
199 * ti_sci_do_xfer() - Do one transfer
200 * @info: Pointer to SCI entity information
201 * @xfer: Transfer to initiate and wait for response
202 *
203 * Return: 0 if all went fine, else return appropriate error.
204 */
205static inline int ti_sci_do_xfer(struct ti_sci_info *info,
206 struct ti_sci_xfer *xfer)
207{
208 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
209 u8 secure_buf[info->desc->max_msg_size];
210 struct ti_sci_secure_msg_hdr secure_hdr;
211 int ret;
212
213 if (info->is_secure) {
214 /* ToDo: get checksum of the entire message */
215 secure_hdr.checksum = 0;
216 secure_hdr.reserved = 0;
217 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
218 xfer->tx_message.len);
219
220 xfer->tx_message.buf = (u32 *)secure_buf;
221 xfer->tx_message.len += sizeof(secure_hdr);
222 xfer->rx_len += sizeof(secure_hdr);
223 }
224
225 /* Send the message */
226 ret = mbox_send(&info->chan_tx, msg);
227 if (ret) {
228 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
229 __func__, ret);
230 return ret;
231 }
232
233 return ti_sci_get_response(info, xfer, &info->chan_rx);
234}
235
236/**
237 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
238 * @handle: pointer to TI SCI handle
239 *
240 * Updates the SCI information in the internal data structure.
241 *
242 * Return: 0 if all went fine, else return appropriate error.
243 */
244static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
245{
246 struct ti_sci_msg_resp_version *rev_info;
247 struct ti_sci_version_info *ver;
248 struct ti_sci_msg_hdr hdr;
249 struct ti_sci_info *info;
250 struct ti_sci_xfer *xfer;
251 int ret;
252
253 if (IS_ERR(handle))
254 return PTR_ERR(handle);
255 if (!handle)
256 return -EINVAL;
257
258 info = handle_to_ti_sci_info(handle);
259
260 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION, 0x0,
261 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
262 sizeof(*rev_info));
263 if (IS_ERR(xfer)) {
264 ret = PTR_ERR(xfer);
265 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
266 return ret;
267 }
268
269 ret = ti_sci_do_xfer(info, xfer);
270 if (ret) {
271 dev_err(info->dev, "Mbox communication fail %d\n", ret);
272 return ret;
273 }
274
275 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
276
277 ver = &handle->version;
278 ver->abi_major = rev_info->abi_major;
279 ver->abi_minor = rev_info->abi_minor;
280 ver->firmware_revision = rev_info->firmware_revision;
281 strncpy(ver->firmware_description, rev_info->firmware_description,
282 sizeof(ver->firmware_description));
283
284 return 0;
285}
286
287/**
288 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
289 * @r: pointer to response buffer
290 *
291 * Return: true if the response was an ACK, else returns false.
292 */
293static inline bool ti_sci_is_response_ack(void *r)
294{
295 struct ti_sci_msg_hdr *hdr = r;
296
297 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
298}
299
300/**
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530301 * cmd_set_board_config_using_msg() - Common command to send board configuration
302 * message
303 * @handle: pointer to TI SCI handle
304 * @msg_type: One of the TISCI message types to set board configuration
305 * @addr: Address where the board config structure is located
306 * @size: Size of the board config structure
307 *
308 * Return: 0 if all went well, else returns appropriate error value.
309 */
310static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
311 u16 msg_type, u64 addr, u32 size)
312{
313 struct ti_sci_msg_board_config req;
314 struct ti_sci_msg_hdr *resp;
315 struct ti_sci_info *info;
316 struct ti_sci_xfer *xfer;
317 int ret = 0;
318
319 if (IS_ERR(handle))
320 return PTR_ERR(handle);
321 if (!handle)
322 return -EINVAL;
323
324 info = handle_to_ti_sci_info(handle);
325
326 xfer = ti_sci_setup_one_xfer(info, msg_type,
327 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
328 (u32 *)&req, sizeof(req), sizeof(*resp));
329 if (IS_ERR(xfer)) {
330 ret = PTR_ERR(xfer);
331 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
332 return ret;
333 }
334 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
335 req.boardcfgp_low = addr & 0xffffffff;
336 req.boardcfg_size = size;
337
338 ret = ti_sci_do_xfer(info, xfer);
339 if (ret) {
340 dev_err(info->dev, "Mbox send fail %d\n", ret);
341 return ret;
342 }
343
344 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
345
346 if (!ti_sci_is_response_ack(resp))
347 return -ENODEV;
348
349 return ret;
350}
351
352/**
353 * ti_sci_cmd_set_board_config() - Command to send board configuration message
354 * @handle: pointer to TI SCI handle
355 * @addr: Address where the board config structure is located
356 * @size: Size of the board config structure
357 *
358 * Return: 0 if all went well, else returns appropriate error value.
359 */
360static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
361 u64 addr, u32 size)
362{
363 return cmd_set_board_config_using_msg(handle,
364 TI_SCI_MSG_BOARD_CONFIG,
365 addr, size);
366}
367
368/**
369 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
370 * management configuration
371 * @handle: pointer to TI SCI handle
372 * @addr: Address where the board RM config structure is located
373 * @size: Size of the RM config structure
374 *
375 * Return: 0 if all went well, else returns appropriate error value.
376 */
377static
378int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
379 u64 addr, u32 size)
380{
381 return cmd_set_board_config_using_msg(handle,
382 TI_SCI_MSG_BOARD_CONFIG_RM,
383 addr, size);
384}
385
386/**
387 * ti_sci_cmd_set_board_config_security() - Command to send board security
388 * configuration message
389 * @handle: pointer to TI SCI handle
390 * @addr: Address where the board security config structure is located
391 * @size: Size of the security config structure
392 *
393 * Return: 0 if all went well, else returns appropriate error value.
394 */
395static
396int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
397 u64 addr, u32 size)
398{
399 return cmd_set_board_config_using_msg(handle,
400 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
401 addr, size);
402}
403
404/**
405 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
406 * configuration message
407 * @handle: pointer to TI SCI handle
408 * @addr: Address where the board PM config structure is located
409 * @size: Size of the PM config structure
410 *
411 * Return: 0 if all went well, else returns appropriate error value.
412 */
413static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
414 u64 addr, u32 size)
415{
416 return cmd_set_board_config_using_msg(handle,
417 TI_SCI_MSG_BOARD_CONFIG_PM,
418 addr, size);
419}
420
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530421/**
422 * ti_sci_set_device_state() - Set device state helper
423 * @handle: pointer to TI SCI handle
424 * @id: Device identifier
425 * @flags: flags to setup for the device
426 * @state: State to move the device to
427 *
428 * Return: 0 if all went well, else returns appropriate error value.
429 */
430static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
431 u32 id, u32 flags, u8 state)
432{
433 struct ti_sci_msg_req_set_device_state req;
434 struct ti_sci_msg_hdr *resp;
435 struct ti_sci_info *info;
436 struct ti_sci_xfer *xfer;
437 int ret = 0;
438
439 if (IS_ERR(handle))
440 return PTR_ERR(handle);
441 if (!handle)
442 return -EINVAL;
443
444 info = handle_to_ti_sci_info(handle);
445
446 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
447 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
448 (u32 *)&req, sizeof(req), sizeof(*resp));
449 if (IS_ERR(xfer)) {
450 ret = PTR_ERR(xfer);
451 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
452 return ret;
453 }
454 req.id = id;
455 req.state = state;
456
457 ret = ti_sci_do_xfer(info, xfer);
458 if (ret) {
459 dev_err(info->dev, "Mbox send fail %d\n", ret);
460 return ret;
461 }
462
463 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
464
465 if (!ti_sci_is_response_ack(resp))
466 return -ENODEV;
467
468 return ret;
469}
470
471/**
472 * ti_sci_get_device_state() - Get device state helper
473 * @handle: Handle to the device
474 * @id: Device Identifier
475 * @clcnt: Pointer to Context Loss Count
476 * @resets: pointer to resets
477 * @p_state: pointer to p_state
478 * @c_state: pointer to c_state
479 *
480 * Return: 0 if all went fine, else return appropriate error.
481 */
482static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
483 u32 id, u32 *clcnt, u32 *resets,
484 u8 *p_state, u8 *c_state)
485{
486 struct ti_sci_msg_resp_get_device_state *resp;
487 struct ti_sci_msg_req_get_device_state req;
488 struct ti_sci_info *info;
489 struct ti_sci_xfer *xfer;
490 int ret = 0;
491
492 if (IS_ERR(handle))
493 return PTR_ERR(handle);
494 if (!handle)
495 return -EINVAL;
496
497 if (!clcnt && !resets && !p_state && !c_state)
498 return -EINVAL;
499
500 info = handle_to_ti_sci_info(handle);
501
502 /* Response is expected, so need of any flags */
503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 0,
504 (u32 *)&req, sizeof(req), sizeof(*resp));
505 if (IS_ERR(xfer)) {
506 ret = PTR_ERR(xfer);
507 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
508 return ret;
509 }
510 req.id = id;
511
512 ret = ti_sci_do_xfer(info, xfer);
513 if (ret) {
514 dev_err(dev, "Mbox send fail %d\n", ret);
515 return ret;
516 }
517
518 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
519 if (!ti_sci_is_response_ack(resp))
520 return -ENODEV;
521
522 if (clcnt)
523 *clcnt = resp->context_loss_count;
524 if (resets)
525 *resets = resp->resets;
526 if (p_state)
527 *p_state = resp->programmed_state;
528 if (c_state)
529 *c_state = resp->current_state;
530
531 return ret;
532}
533
534/**
535 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
536 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
537 * @id: Device Identifier
538 *
539 * Request for the device - NOTE: the client MUST maintain integrity of
540 * usage count by balancing get_device with put_device. No refcounting is
541 * managed by driver for that purpose.
542 *
543 * NOTE: The request is for exclusive access for the processor.
544 *
545 * Return: 0 if all went fine, else return appropriate error.
546 */
547static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
548{
549 return ti_sci_set_device_state(handle, id,
550 MSG_FLAG_DEVICE_EXCLUSIVE,
551 MSG_DEVICE_SW_STATE_ON);
552}
553
554/**
555 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
556 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
557 * @id: Device Identifier
558 *
559 * Request for the device - NOTE: the client MUST maintain integrity of
560 * usage count by balancing get_device with put_device. No refcounting is
561 * managed by driver for that purpose.
562 *
563 * Return: 0 if all went fine, else return appropriate error.
564 */
565static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
566{
567 return ti_sci_set_device_state(handle, id,
568 MSG_FLAG_DEVICE_EXCLUSIVE,
569 MSG_DEVICE_SW_STATE_RETENTION);
570}
571
572/**
573 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
574 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
575 * @id: Device Identifier
576 *
577 * Request for the device - NOTE: the client MUST maintain integrity of
578 * usage count by balancing get_device with put_device. No refcounting is
579 * managed by driver for that purpose.
580 *
581 * Return: 0 if all went fine, else return appropriate error.
582 */
583static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
584{
585 return ti_sci_set_device_state(handle, id,
586 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
587}
588
589/**
590 * ti_sci_cmd_dev_is_valid() - Is the device valid
591 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
592 * @id: Device Identifier
593 *
594 * Return: 0 if all went fine and the device ID is valid, else return
595 * appropriate error.
596 */
597static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
598{
599 u8 unused;
600
601 /* check the device state which will also tell us if the ID is valid */
602 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
603}
604
605/**
606 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
607 * @handle: Pointer to TISCI handle
608 * @id: Device Identifier
609 * @count: Pointer to Context Loss counter to populate
610 *
611 * Return: 0 if all went fine, else return appropriate error.
612 */
613static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
614 u32 *count)
615{
616 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
617}
618
619/**
620 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
621 * @handle: Pointer to TISCI handle
622 * @id: Device Identifier
623 * @r_state: true if requested to be idle
624 *
625 * Return: 0 if all went fine, else return appropriate error.
626 */
627static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
628 bool *r_state)
629{
630 int ret;
631 u8 state;
632
633 if (!r_state)
634 return -EINVAL;
635
636 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
637 if (ret)
638 return ret;
639
640 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
641
642 return 0;
643}
644
645/**
646 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
647 * @handle: Pointer to TISCI handle
648 * @id: Device Identifier
649 * @r_state: true if requested to be stopped
650 * @curr_state: true if currently stopped.
651 *
652 * Return: 0 if all went fine, else return appropriate error.
653 */
654static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
655 bool *r_state, bool *curr_state)
656{
657 int ret;
658 u8 p_state, c_state;
659
660 if (!r_state && !curr_state)
661 return -EINVAL;
662
663 ret =
664 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
665 if (ret)
666 return ret;
667
668 if (r_state)
669 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
670 if (curr_state)
671 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
672
673 return 0;
674}
675
676/**
677 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
678 * @handle: Pointer to TISCI handle
679 * @id: Device Identifier
680 * @r_state: true if requested to be ON
681 * @curr_state: true if currently ON and active
682 *
683 * Return: 0 if all went fine, else return appropriate error.
684 */
685static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
686 bool *r_state, bool *curr_state)
687{
688 int ret;
689 u8 p_state, c_state;
690
691 if (!r_state && !curr_state)
692 return -EINVAL;
693
694 ret =
695 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
696 if (ret)
697 return ret;
698
699 if (r_state)
700 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
701 if (curr_state)
702 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
703
704 return 0;
705}
706
707/**
708 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
709 * @handle: Pointer to TISCI handle
710 * @id: Device Identifier
711 * @curr_state: true if currently transitioning.
712 *
713 * Return: 0 if all went fine, else return appropriate error.
714 */
715static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
716 bool *curr_state)
717{
718 int ret;
719 u8 state;
720
721 if (!curr_state)
722 return -EINVAL;
723
724 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
725 if (ret)
726 return ret;
727
728 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
729
730 return 0;
731}
732
733/**
734 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
735 * by TISCI
736 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
737 * @id: Device Identifier
738 * @reset_state: Device specific reset bit field
739 *
740 * Return: 0 if all went fine, else return appropriate error.
741 */
742static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
743 u32 id, u32 reset_state)
744{
745 struct ti_sci_msg_req_set_device_resets req;
746 struct ti_sci_msg_hdr *resp;
747 struct ti_sci_info *info;
748 struct ti_sci_xfer *xfer;
749 int ret = 0;
750
751 if (IS_ERR(handle))
752 return PTR_ERR(handle);
753 if (!handle)
754 return -EINVAL;
755
756 info = handle_to_ti_sci_info(handle);
757
758 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
759 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
760 (u32 *)&req, sizeof(req), sizeof(*resp));
761 if (IS_ERR(xfer)) {
762 ret = PTR_ERR(xfer);
763 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
764 return ret;
765 }
766 req.id = id;
767 req.resets = reset_state;
768
769 ret = ti_sci_do_xfer(info, xfer);
770 if (ret) {
771 dev_err(info->dev, "Mbox send fail %d\n", ret);
772 return ret;
773 }
774
775 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
776
777 if (!ti_sci_is_response_ack(resp))
778 return -ENODEV;
779
780 return ret;
781}
782
783/**
784 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
785 * by TISCI
786 * @handle: Pointer to TISCI handle
787 * @id: Device Identifier
788 * @reset_state: Pointer to reset state to populate
789 *
790 * Return: 0 if all went fine, else return appropriate error.
791 */
792static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
793 u32 id, u32 *reset_state)
794{
795 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
796 NULL);
797}
798
Lokesh Vutla9b871812018-08-27 15:57:35 +0530799/**
800 * ti_sci_set_clock_state() - Set clock state helper
801 * @handle: pointer to TI SCI handle
802 * @dev_id: Device identifier this request is for
803 * @clk_id: Clock identifier for the device for this request.
804 * Each device has it's own set of clock inputs. This indexes
805 * which clock input to modify.
806 * @flags: Header flags as needed
807 * @state: State to request for the clock.
808 *
809 * Return: 0 if all went well, else returns appropriate error value.
810 */
811static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
812 u32 dev_id, u8 clk_id,
813 u32 flags, u8 state)
814{
815 struct ti_sci_msg_req_set_clock_state req;
816 struct ti_sci_msg_hdr *resp;
817 struct ti_sci_info *info;
818 struct ti_sci_xfer *xfer;
819 int ret = 0;
820
821 if (IS_ERR(handle))
822 return PTR_ERR(handle);
823 if (!handle)
824 return -EINVAL;
825
826 info = handle_to_ti_sci_info(handle);
827
828 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
829 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
830 (u32 *)&req, sizeof(req), sizeof(*resp));
831 if (IS_ERR(xfer)) {
832 ret = PTR_ERR(xfer);
833 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
834 return ret;
835 }
836 req.dev_id = dev_id;
837 req.clk_id = clk_id;
838 req.request_state = state;
839
840 ret = ti_sci_do_xfer(info, xfer);
841 if (ret) {
842 dev_err(info->dev, "Mbox send fail %d\n", ret);
843 return ret;
844 }
845
846 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
847
848 if (!ti_sci_is_response_ack(resp))
849 return -ENODEV;
850
851 return ret;
852}
853
854/**
855 * ti_sci_cmd_get_clock_state() - Get clock state helper
856 * @handle: pointer to TI SCI handle
857 * @dev_id: Device identifier this request is for
858 * @clk_id: Clock identifier for the device for this request.
859 * Each device has it's own set of clock inputs. This indexes
860 * which clock input to modify.
861 * @programmed_state: State requested for clock to move to
862 * @current_state: State that the clock is currently in
863 *
864 * Return: 0 if all went well, else returns appropriate error value.
865 */
866static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
867 u32 dev_id, u8 clk_id,
868 u8 *programmed_state, u8 *current_state)
869{
870 struct ti_sci_msg_resp_get_clock_state *resp;
871 struct ti_sci_msg_req_get_clock_state req;
872 struct ti_sci_info *info;
873 struct ti_sci_xfer *xfer;
874 int ret = 0;
875
876 if (IS_ERR(handle))
877 return PTR_ERR(handle);
878 if (!handle)
879 return -EINVAL;
880
881 if (!programmed_state && !current_state)
882 return -EINVAL;
883
884 info = handle_to_ti_sci_info(handle);
885
886 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
887 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
888 (u32 *)&req, sizeof(req), sizeof(*resp));
889 if (IS_ERR(xfer)) {
890 ret = PTR_ERR(xfer);
891 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
892 return ret;
893 }
894 req.dev_id = dev_id;
895 req.clk_id = clk_id;
896
897 ret = ti_sci_do_xfer(info, xfer);
898 if (ret) {
899 dev_err(info->dev, "Mbox send fail %d\n", ret);
900 return ret;
901 }
902
903 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
904
905 if (!ti_sci_is_response_ack(resp))
906 return -ENODEV;
907
908 if (programmed_state)
909 *programmed_state = resp->programmed_state;
910 if (current_state)
911 *current_state = resp->current_state;
912
913 return ret;
914}
915
916/**
917 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
918 * @handle: pointer to TI SCI handle
919 * @dev_id: Device identifier this request is for
920 * @clk_id: Clock identifier for the device for this request.
921 * Each device has it's own set of clock inputs. This indexes
922 * which clock input to modify.
923 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
924 * @can_change_freq: 'true' if frequency change is desired, else 'false'
925 * @enable_input_term: 'true' if input termination is desired, else 'false'
926 *
927 * Return: 0 if all went well, else returns appropriate error value.
928 */
929static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
930 u8 clk_id, bool needs_ssc, bool can_change_freq,
931 bool enable_input_term)
932{
933 u32 flags = 0;
934
935 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
936 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
937 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
938
939 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
940 MSG_CLOCK_SW_STATE_REQ);
941}
942
943/**
944 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
945 * @handle: pointer to TI SCI handle
946 * @dev_id: Device identifier this request is for
947 * @clk_id: Clock identifier for the device for this request.
948 * Each device has it's own set of clock inputs. This indexes
949 * which clock input to modify.
950 *
951 * NOTE: This clock must have been requested by get_clock previously.
952 *
953 * Return: 0 if all went well, else returns appropriate error value.
954 */
955static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
956 u32 dev_id, u8 clk_id)
957{
958 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
959 MSG_CLOCK_SW_STATE_UNREQ);
960}
961
962/**
963 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
964 * @handle: pointer to TI SCI handle
965 * @dev_id: Device identifier this request is for
966 * @clk_id: Clock identifier for the device for this request.
967 * Each device has it's own set of clock inputs. This indexes
968 * which clock input to modify.
969 *
970 * NOTE: This clock must have been requested by get_clock previously.
971 *
972 * Return: 0 if all went well, else returns appropriate error value.
973 */
974static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
975 u32 dev_id, u8 clk_id)
976{
977 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
978 MSG_CLOCK_SW_STATE_AUTO);
979}
980
981/**
982 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
983 * @handle: pointer to TI SCI handle
984 * @dev_id: Device identifier this request is for
985 * @clk_id: Clock identifier for the device for this request.
986 * Each device has it's own set of clock inputs. This indexes
987 * which clock input to modify.
988 * @req_state: state indicating if the clock is auto managed
989 *
990 * Return: 0 if all went well, else returns appropriate error value.
991 */
992static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
993 u32 dev_id, u8 clk_id, bool *req_state)
994{
995 u8 state = 0;
996 int ret;
997
998 if (!req_state)
999 return -EINVAL;
1000
1001 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1002 if (ret)
1003 return ret;
1004
1005 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1006 return 0;
1007}
1008
1009/**
1010 * ti_sci_cmd_clk_is_on() - Is the clock ON
1011 * @handle: pointer to TI SCI handle
1012 * @dev_id: Device identifier this request is for
1013 * @clk_id: Clock identifier for the device for this request.
1014 * Each device has it's own set of clock inputs. This indexes
1015 * which clock input to modify.
1016 * @req_state: state indicating if the clock is managed by us and enabled
1017 * @curr_state: state indicating if the clock is ready for operation
1018 *
1019 * Return: 0 if all went well, else returns appropriate error value.
1020 */
1021static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1022 u8 clk_id, bool *req_state, bool *curr_state)
1023{
1024 u8 c_state = 0, r_state = 0;
1025 int ret;
1026
1027 if (!req_state && !curr_state)
1028 return -EINVAL;
1029
1030 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1031 &r_state, &c_state);
1032 if (ret)
1033 return ret;
1034
1035 if (req_state)
1036 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1037 if (curr_state)
1038 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1039 return 0;
1040}
1041
1042/**
1043 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1044 * @handle: pointer to TI SCI handle
1045 * @dev_id: Device identifier this request is for
1046 * @clk_id: Clock identifier for the device for this request.
1047 * Each device has it's own set of clock inputs. This indexes
1048 * which clock input to modify.
1049 * @req_state: state indicating if the clock is managed by us and disabled
1050 * @curr_state: state indicating if the clock is NOT ready for operation
1051 *
1052 * Return: 0 if all went well, else returns appropriate error value.
1053 */
1054static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1055 u8 clk_id, bool *req_state, bool *curr_state)
1056{
1057 u8 c_state = 0, r_state = 0;
1058 int ret;
1059
1060 if (!req_state && !curr_state)
1061 return -EINVAL;
1062
1063 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1064 &r_state, &c_state);
1065 if (ret)
1066 return ret;
1067
1068 if (req_state)
1069 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1070 if (curr_state)
1071 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1072 return 0;
1073}
1074
1075/**
1076 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1077 * @handle: pointer to TI SCI handle
1078 * @dev_id: Device identifier this request is for
1079 * @clk_id: Clock identifier for the device for this request.
1080 * Each device has it's own set of clock inputs. This indexes
1081 * which clock input to modify.
1082 * @parent_id: Parent clock identifier to set
1083 *
1084 * Return: 0 if all went well, else returns appropriate error value.
1085 */
1086static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1087 u32 dev_id, u8 clk_id, u8 parent_id)
1088{
1089 struct ti_sci_msg_req_set_clock_parent req;
1090 struct ti_sci_msg_hdr *resp;
1091 struct ti_sci_info *info;
1092 struct ti_sci_xfer *xfer;
1093 int ret = 0;
1094
1095 if (IS_ERR(handle))
1096 return PTR_ERR(handle);
1097 if (!handle)
1098 return -EINVAL;
1099
1100 info = handle_to_ti_sci_info(handle);
1101
1102 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1103 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1104 (u32 *)&req, sizeof(req), sizeof(*resp));
1105 if (IS_ERR(xfer)) {
1106 ret = PTR_ERR(xfer);
1107 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1108 return ret;
1109 }
1110 req.dev_id = dev_id;
1111 req.clk_id = clk_id;
1112 req.parent_id = parent_id;
1113
1114 ret = ti_sci_do_xfer(info, xfer);
1115 if (ret) {
1116 dev_err(info->dev, "Mbox send fail %d\n", ret);
1117 return ret;
1118 }
1119
1120 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1121
1122 if (!ti_sci_is_response_ack(resp))
1123 return -ENODEV;
1124
1125 return ret;
1126}
1127
1128/**
1129 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1130 * @handle: pointer to TI SCI handle
1131 * @dev_id: Device identifier this request is for
1132 * @clk_id: Clock identifier for the device for this request.
1133 * Each device has it's own set of clock inputs. This indexes
1134 * which clock input to modify.
1135 * @parent_id: Current clock parent
1136 *
1137 * Return: 0 if all went well, else returns appropriate error value.
1138 */
1139static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1140 u32 dev_id, u8 clk_id, u8 *parent_id)
1141{
1142 struct ti_sci_msg_resp_get_clock_parent *resp;
1143 struct ti_sci_msg_req_get_clock_parent req;
1144 struct ti_sci_info *info;
1145 struct ti_sci_xfer *xfer;
1146 int ret = 0;
1147
1148 if (IS_ERR(handle))
1149 return PTR_ERR(handle);
1150 if (!handle || !parent_id)
1151 return -EINVAL;
1152
1153 info = handle_to_ti_sci_info(handle);
1154
1155 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1156 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1157 (u32 *)&req, sizeof(req), sizeof(*resp));
1158 if (IS_ERR(xfer)) {
1159 ret = PTR_ERR(xfer);
1160 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1161 return ret;
1162 }
1163 req.dev_id = dev_id;
1164 req.clk_id = clk_id;
1165
1166 ret = ti_sci_do_xfer(info, xfer);
1167 if (ret) {
1168 dev_err(info->dev, "Mbox send fail %d\n", ret);
1169 return ret;
1170 }
1171
1172 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1173
1174 if (!ti_sci_is_response_ack(resp))
1175 ret = -ENODEV;
1176 else
1177 *parent_id = resp->parent_id;
1178
1179 return ret;
1180}
1181
1182/**
1183 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1184 * @handle: pointer to TI SCI handle
1185 * @dev_id: Device identifier this request is for
1186 * @clk_id: Clock identifier for the device for this request.
1187 * Each device has it's own set of clock inputs. This indexes
1188 * which clock input to modify.
1189 * @num_parents: Returns he number of parents to the current clock.
1190 *
1191 * Return: 0 if all went well, else returns appropriate error value.
1192 */
1193static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1194 u32 dev_id, u8 clk_id,
1195 u8 *num_parents)
1196{
1197 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1198 struct ti_sci_msg_req_get_clock_num_parents req;
1199 struct ti_sci_info *info;
1200 struct ti_sci_xfer *xfer;
1201 int ret = 0;
1202
1203 if (IS_ERR(handle))
1204 return PTR_ERR(handle);
1205 if (!handle || !num_parents)
1206 return -EINVAL;
1207
1208 info = handle_to_ti_sci_info(handle);
1209
1210 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1211 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1212 (u32 *)&req, sizeof(req), sizeof(*resp));
1213 if (IS_ERR(xfer)) {
1214 ret = PTR_ERR(xfer);
1215 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1216 return ret;
1217 }
1218 req.dev_id = dev_id;
1219 req.clk_id = clk_id;
1220
1221 ret = ti_sci_do_xfer(info, xfer);
1222 if (ret) {
1223 dev_err(info->dev, "Mbox send fail %d\n", ret);
1224 return ret;
1225 }
1226
1227 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1228 xfer->tx_message.buf;
1229
1230 if (!ti_sci_is_response_ack(resp))
1231 ret = -ENODEV;
1232 else
1233 *num_parents = resp->num_parents;
1234
1235 return ret;
1236}
1237
1238/**
1239 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1240 * @handle: pointer to TI SCI handle
1241 * @dev_id: Device identifier this request is for
1242 * @clk_id: Clock identifier for the device for this request.
1243 * Each device has it's own set of clock inputs. This indexes
1244 * which clock input to modify.
1245 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1246 * allowable programmed frequency and does not account for clock
1247 * tolerances and jitter.
1248 * @target_freq: The target clock frequency in Hz. A frequency will be
1249 * processed as close to this target frequency as possible.
1250 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1251 * allowable programmed frequency and does not account for clock
1252 * tolerances and jitter.
1253 * @match_freq: Frequency match in Hz response.
1254 *
1255 * Return: 0 if all went well, else returns appropriate error value.
1256 */
1257static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1258 u32 dev_id, u8 clk_id, u64 min_freq,
1259 u64 target_freq, u64 max_freq,
1260 u64 *match_freq)
1261{
1262 struct ti_sci_msg_resp_query_clock_freq *resp;
1263 struct ti_sci_msg_req_query_clock_freq req;
1264 struct ti_sci_info *info;
1265 struct ti_sci_xfer *xfer;
1266 int ret = 0;
1267
1268 if (IS_ERR(handle))
1269 return PTR_ERR(handle);
1270 if (!handle || !match_freq)
1271 return -EINVAL;
1272
1273 info = handle_to_ti_sci_info(handle);
1274
1275 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1276 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1277 (u32 *)&req, sizeof(req), sizeof(*resp));
1278 if (IS_ERR(xfer)) {
1279 ret = PTR_ERR(xfer);
1280 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1281 return ret;
1282 }
1283 req.dev_id = dev_id;
1284 req.clk_id = clk_id;
1285 req.min_freq_hz = min_freq;
1286 req.target_freq_hz = target_freq;
1287 req.max_freq_hz = max_freq;
1288
1289 ret = ti_sci_do_xfer(info, xfer);
1290 if (ret) {
1291 dev_err(info->dev, "Mbox send fail %d\n", ret);
1292 return ret;
1293 }
1294
1295 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1296
1297 if (!ti_sci_is_response_ack(resp))
1298 ret = -ENODEV;
1299 else
1300 *match_freq = resp->freq_hz;
1301
1302 return ret;
1303}
1304
1305/**
1306 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1307 * @handle: pointer to TI SCI handle
1308 * @dev_id: Device identifier this request is for
1309 * @clk_id: Clock identifier for the device for this request.
1310 * Each device has it's own set of clock inputs. This indexes
1311 * which clock input to modify.
1312 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1313 * allowable programmed frequency and does not account for clock
1314 * tolerances and jitter.
1315 * @target_freq: The target clock frequency in Hz. A frequency will be
1316 * processed as close to this target frequency as possible.
1317 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1318 * allowable programmed frequency and does not account for clock
1319 * tolerances and jitter.
1320 *
1321 * Return: 0 if all went well, else returns appropriate error value.
1322 */
1323static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1324 u32 dev_id, u8 clk_id, u64 min_freq,
1325 u64 target_freq, u64 max_freq)
1326{
1327 struct ti_sci_msg_req_set_clock_freq req;
1328 struct ti_sci_msg_hdr *resp;
1329 struct ti_sci_info *info;
1330 struct ti_sci_xfer *xfer;
1331 int ret = 0;
1332
1333 if (IS_ERR(handle))
1334 return PTR_ERR(handle);
1335 if (!handle)
1336 return -EINVAL;
1337
1338 info = handle_to_ti_sci_info(handle);
1339
1340 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1341 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1342 (u32 *)&req, sizeof(req), sizeof(*resp));
1343 if (IS_ERR(xfer)) {
1344 ret = PTR_ERR(xfer);
1345 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1346 return ret;
1347 }
1348 req.dev_id = dev_id;
1349 req.clk_id = clk_id;
1350 req.min_freq_hz = min_freq;
1351 req.target_freq_hz = target_freq;
1352 req.max_freq_hz = max_freq;
1353
1354 ret = ti_sci_do_xfer(info, xfer);
1355 if (ret) {
1356 dev_err(info->dev, "Mbox send fail %d\n", ret);
1357 return ret;
1358 }
1359
1360 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1361
1362 if (!ti_sci_is_response_ack(resp))
1363 return -ENODEV;
1364
1365 return ret;
1366}
1367
1368/**
1369 * ti_sci_cmd_clk_get_freq() - Get current frequency
1370 * @handle: pointer to TI SCI handle
1371 * @dev_id: Device identifier this request is for
1372 * @clk_id: Clock identifier for the device for this request.
1373 * Each device has it's own set of clock inputs. This indexes
1374 * which clock input to modify.
1375 * @freq: Currently frequency in Hz
1376 *
1377 * Return: 0 if all went well, else returns appropriate error value.
1378 */
1379static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1380 u32 dev_id, u8 clk_id, u64 *freq)
1381{
1382 struct ti_sci_msg_resp_get_clock_freq *resp;
1383 struct ti_sci_msg_req_get_clock_freq req;
1384 struct ti_sci_info *info;
1385 struct ti_sci_xfer *xfer;
1386 int ret = 0;
1387
1388 if (IS_ERR(handle))
1389 return PTR_ERR(handle);
1390 if (!handle || !freq)
1391 return -EINVAL;
1392
1393 info = handle_to_ti_sci_info(handle);
1394
1395 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1397 (u32 *)&req, sizeof(req), sizeof(*resp));
1398 if (IS_ERR(xfer)) {
1399 ret = PTR_ERR(xfer);
1400 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1401 return ret;
1402 }
1403 req.dev_id = dev_id;
1404 req.clk_id = clk_id;
1405
1406 ret = ti_sci_do_xfer(info, xfer);
1407 if (ret) {
1408 dev_err(info->dev, "Mbox send fail %d\n", ret);
1409 return ret;
1410 }
1411
1412 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1413
1414 if (!ti_sci_is_response_ack(resp))
1415 ret = -ENODEV;
1416 else
1417 *freq = resp->freq_hz;
1418
1419 return ret;
1420}
1421
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301422/**
1423 * ti_sci_cmd_core_reboot() - Command to request system reset
1424 * @handle: pointer to TI SCI handle
1425 *
1426 * Return: 0 if all went well, else returns appropriate error value.
1427 */
1428static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1429{
1430 struct ti_sci_msg_req_reboot req;
1431 struct ti_sci_msg_hdr *resp;
1432 struct ti_sci_info *info;
1433 struct ti_sci_xfer *xfer;
1434 int ret = 0;
1435
1436 if (IS_ERR(handle))
1437 return PTR_ERR(handle);
1438 if (!handle)
1439 return -EINVAL;
1440
1441 info = handle_to_ti_sci_info(handle);
1442
1443 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1444 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1445 (u32 *)&req, sizeof(req), sizeof(*resp));
1446 if (IS_ERR(xfer)) {
1447 ret = PTR_ERR(xfer);
1448 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1449 return ret;
1450 }
1451
1452 ret = ti_sci_do_xfer(info, xfer);
1453 if (ret) {
1454 dev_err(dev, "Mbox send fail %d\n", ret);
1455 return ret;
1456 }
1457
1458 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1459
1460 if (!ti_sci_is_response_ack(resp))
1461 return -ENODEV;
1462
1463 return ret;
1464}
1465
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301466static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1467 u16 *type)
1468{
1469 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1470 bool found = false;
1471 int i;
1472
1473 /* If map is not provided then assume dev_id is used as type */
1474 if (!rm_type_map) {
1475 *type = dev_id;
1476 return 0;
1477 }
1478
1479 for (i = 0; rm_type_map[i].dev_id; i++) {
1480 if (rm_type_map[i].dev_id == dev_id) {
1481 *type = rm_type_map[i].type;
1482 found = true;
1483 break;
1484 }
1485 }
1486
1487 if (!found)
1488 return -EINVAL;
1489
1490 return 0;
1491}
1492
1493/**
1494 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1495 * to a host. Resource is uniquely identified by
1496 * type and subtype.
1497 * @handle: Pointer to TISCI handle.
1498 * @dev_id: TISCI device ID.
1499 * @subtype: Resource assignment subtype that is being requested
1500 * from the given device.
1501 * @s_host: Host processor ID to which the resources are allocated
1502 * @range_start: Start index of the resource range
1503 * @range_num: Number of resources in the range
1504 *
1505 * Return: 0 if all went fine, else return appropriate error.
1506 */
1507static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1508 u32 dev_id, u8 subtype, u8 s_host,
1509 u16 *range_start, u16 *range_num)
1510{
1511 struct ti_sci_msg_resp_get_resource_range *resp;
1512 struct ti_sci_msg_req_get_resource_range req;
1513 struct ti_sci_xfer *xfer;
1514 struct ti_sci_info *info;
1515 u16 type;
1516 int ret = 0;
1517
1518 if (IS_ERR(handle))
1519 return PTR_ERR(handle);
1520 if (!handle)
1521 return -EINVAL;
1522
1523 info = handle_to_ti_sci_info(handle);
1524
1525 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1526 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1527 (u32 *)&req, sizeof(req), sizeof(*resp));
1528 if (IS_ERR(xfer)) {
1529 ret = PTR_ERR(xfer);
1530 dev_err(dev, "Message alloc failed(%d)\n", ret);
1531 return ret;
1532 }
1533
1534 ret = ti_sci_get_resource_type(info, dev_id, &type);
1535 if (ret) {
1536 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1537 goto fail;
1538 }
1539
1540 req.secondary_host = s_host;
1541 req.type = type & MSG_RM_RESOURCE_TYPE_MASK;
1542 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1543
1544 ret = ti_sci_do_xfer(info, xfer);
1545 if (ret) {
1546 dev_err(dev, "Mbox send fail %d\n", ret);
1547 goto fail;
1548 }
1549
1550 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1551 if (!ti_sci_is_response_ack(resp)) {
1552 ret = -ENODEV;
1553 } else if (!resp->range_start && !resp->range_num) {
1554 ret = -ENODEV;
1555 } else {
1556 *range_start = resp->range_start;
1557 *range_num = resp->range_num;
1558 };
1559
1560fail:
1561 return ret;
1562}
1563
1564/**
1565 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1566 * that is same as ti sci interface host.
1567 * @handle: Pointer to TISCI handle.
1568 * @dev_id: TISCI device ID.
1569 * @subtype: Resource assignment subtype that is being requested
1570 * from the given device.
1571 * @range_start: Start index of the resource range
1572 * @range_num: Number of resources in the range
1573 *
1574 * Return: 0 if all went fine, else return appropriate error.
1575 */
1576static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1577 u32 dev_id, u8 subtype,
1578 u16 *range_start, u16 *range_num)
1579{
1580 return ti_sci_get_resource_range(handle, dev_id, subtype,
1581 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1582 range_start, range_num);
1583}
1584
1585/**
1586 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1587 * assigned to a specified host.
1588 * @handle: Pointer to TISCI handle.
1589 * @dev_id: TISCI device ID.
1590 * @subtype: Resource assignment subtype that is being requested
1591 * from the given device.
1592 * @s_host: Host processor ID to which the resources are allocated
1593 * @range_start: Start index of the resource range
1594 * @range_num: Number of resources in the range
1595 *
1596 * Return: 0 if all went fine, else return appropriate error.
1597 */
1598static
1599int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1600 u32 dev_id, u8 subtype, u8 s_host,
1601 u16 *range_start, u16 *range_num)
1602{
1603 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1604 range_start, range_num);
1605}
1606
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301607/**
Lokesh Vutla826eb742019-03-08 11:47:32 +05301608 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1609 * @handle: pointer to TI SCI handle
1610 * @msms_start: MSMC start as returned by tisci
1611 * @msmc_end: MSMC end as returned by tisci
1612 *
1613 * Return: 0 if all went well, else returns appropriate error value.
1614 */
1615static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1616 u64 *msmc_start, u64 *msmc_end)
1617{
1618 struct ti_sci_msg_resp_query_msmc *resp;
1619 struct ti_sci_msg_hdr req;
1620 struct ti_sci_info *info;
1621 struct ti_sci_xfer *xfer;
1622 int ret = 0;
1623
1624 if (IS_ERR(handle))
1625 return PTR_ERR(handle);
1626 if (!handle)
1627 return -EINVAL;
1628
1629 info = handle_to_ti_sci_info(handle);
1630
1631 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1633 (u32 *)&req, sizeof(req), sizeof(*resp));
1634 if (IS_ERR(xfer)) {
1635 ret = PTR_ERR(xfer);
1636 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1637 return ret;
1638 }
1639
1640 ret = ti_sci_do_xfer(info, xfer);
1641 if (ret) {
1642 dev_err(dev, "Mbox send fail %d\n", ret);
1643 return ret;
1644 }
1645
1646 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1647
1648 if (!ti_sci_is_response_ack(resp))
1649 return -ENODEV;
1650
1651 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1652 resp->msmc_start_low;
1653 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1654 resp->msmc_end_low;
1655
1656 return ret;
1657}
1658
1659/**
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301660 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1661 * @handle: Pointer to TI SCI handle
1662 * @proc_id: Processor ID this request is for
1663 *
1664 * Return: 0 if all went well, else returns appropriate error value.
1665 */
1666static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1667 u8 proc_id)
1668{
1669 struct ti_sci_msg_req_proc_request req;
1670 struct ti_sci_msg_hdr *resp;
1671 struct ti_sci_info *info;
1672 struct ti_sci_xfer *xfer;
1673 int ret = 0;
1674
1675 if (IS_ERR(handle))
1676 return PTR_ERR(handle);
1677 if (!handle)
1678 return -EINVAL;
1679
1680 info = handle_to_ti_sci_info(handle);
1681
1682 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1683 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1684 (u32 *)&req, sizeof(req), sizeof(*resp));
1685 if (IS_ERR(xfer)) {
1686 ret = PTR_ERR(xfer);
1687 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1688 return ret;
1689 }
1690 req.processor_id = proc_id;
1691
1692 ret = ti_sci_do_xfer(info, xfer);
1693 if (ret) {
1694 dev_err(info->dev, "Mbox send fail %d\n", ret);
1695 return ret;
1696 }
1697
1698 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1699
1700 if (!ti_sci_is_response_ack(resp))
1701 ret = -ENODEV;
1702
1703 return ret;
1704}
1705
1706/**
1707 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1708 * @handle: Pointer to TI SCI handle
1709 * @proc_id: Processor ID this request is for
1710 *
1711 * Return: 0 if all went well, else returns appropriate error value.
1712 */
1713static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1714 u8 proc_id)
1715{
1716 struct ti_sci_msg_req_proc_release req;
1717 struct ti_sci_msg_hdr *resp;
1718 struct ti_sci_info *info;
1719 struct ti_sci_xfer *xfer;
1720 int ret = 0;
1721
1722 if (IS_ERR(handle))
1723 return PTR_ERR(handle);
1724 if (!handle)
1725 return -EINVAL;
1726
1727 info = handle_to_ti_sci_info(handle);
1728
1729 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1730 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1731 (u32 *)&req, sizeof(req), sizeof(*resp));
1732 if (IS_ERR(xfer)) {
1733 ret = PTR_ERR(xfer);
1734 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1735 return ret;
1736 }
1737 req.processor_id = proc_id;
1738
1739 ret = ti_sci_do_xfer(info, xfer);
1740 if (ret) {
1741 dev_err(info->dev, "Mbox send fail %d\n", ret);
1742 return ret;
1743 }
1744
1745 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1746
1747 if (!ti_sci_is_response_ack(resp))
1748 ret = -ENODEV;
1749
1750 return ret;
1751}
1752
1753/**
1754 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1755 * control to a host in the processor's access
1756 * control list.
1757 * @handle: Pointer to TI SCI handle
1758 * @proc_id: Processor ID this request is for
1759 * @host_id: Host ID to get the control of the processor
1760 *
1761 * Return: 0 if all went well, else returns appropriate error value.
1762 */
1763static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1764 u8 proc_id, u8 host_id)
1765{
1766 struct ti_sci_msg_req_proc_handover req;
1767 struct ti_sci_msg_hdr *resp;
1768 struct ti_sci_info *info;
1769 struct ti_sci_xfer *xfer;
1770 int ret = 0;
1771
1772 if (IS_ERR(handle))
1773 return PTR_ERR(handle);
1774 if (!handle)
1775 return -EINVAL;
1776
1777 info = handle_to_ti_sci_info(handle);
1778
1779 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1780 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1781 (u32 *)&req, sizeof(req), sizeof(*resp));
1782 if (IS_ERR(xfer)) {
1783 ret = PTR_ERR(xfer);
1784 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1785 return ret;
1786 }
1787 req.processor_id = proc_id;
1788 req.host_id = host_id;
1789
1790 ret = ti_sci_do_xfer(info, xfer);
1791 if (ret) {
1792 dev_err(info->dev, "Mbox send fail %d\n", ret);
1793 return ret;
1794 }
1795
1796 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1797
1798 if (!ti_sci_is_response_ack(resp))
1799 ret = -ENODEV;
1800
1801 return ret;
1802}
1803
1804/**
1805 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1806 * configuration flags
1807 * @handle: Pointer to TI SCI handle
1808 * @proc_id: Processor ID this request is for
1809 * @config_flags_set: Configuration flags to be set
1810 * @config_flags_clear: Configuration flags to be cleared.
1811 *
1812 * Return: 0 if all went well, else returns appropriate error value.
1813 */
1814static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1815 u8 proc_id, u64 bootvector,
1816 u32 config_flags_set,
1817 u32 config_flags_clear)
1818{
1819 struct ti_sci_msg_req_set_proc_boot_config req;
1820 struct ti_sci_msg_hdr *resp;
1821 struct ti_sci_info *info;
1822 struct ti_sci_xfer *xfer;
1823 int ret = 0;
1824
1825 if (IS_ERR(handle))
1826 return PTR_ERR(handle);
1827 if (!handle)
1828 return -EINVAL;
1829
1830 info = handle_to_ti_sci_info(handle);
1831
1832 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1833 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1834 (u32 *)&req, sizeof(req), sizeof(*resp));
1835 if (IS_ERR(xfer)) {
1836 ret = PTR_ERR(xfer);
1837 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1838 return ret;
1839 }
1840 req.processor_id = proc_id;
1841 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1842 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1843 TISCI_ADDR_HIGH_SHIFT;
1844 req.config_flags_set = config_flags_set;
1845 req.config_flags_clear = config_flags_clear;
1846
1847 ret = ti_sci_do_xfer(info, xfer);
1848 if (ret) {
1849 dev_err(info->dev, "Mbox send fail %d\n", ret);
1850 return ret;
1851 }
1852
1853 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1854
1855 if (!ti_sci_is_response_ack(resp))
1856 ret = -ENODEV;
1857
1858 return ret;
1859}
1860
1861/**
1862 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1863 * control flags
1864 * @handle: Pointer to TI SCI handle
1865 * @proc_id: Processor ID this request is for
1866 * @control_flags_set: Control flags to be set
1867 * @control_flags_clear: Control flags to be cleared
1868 *
1869 * Return: 0 if all went well, else returns appropriate error value.
1870 */
1871static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1872 u8 proc_id, u32 control_flags_set,
1873 u32 control_flags_clear)
1874{
1875 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1876 struct ti_sci_msg_hdr *resp;
1877 struct ti_sci_info *info;
1878 struct ti_sci_xfer *xfer;
1879 int ret = 0;
1880
1881 if (IS_ERR(handle))
1882 return PTR_ERR(handle);
1883 if (!handle)
1884 return -EINVAL;
1885
1886 info = handle_to_ti_sci_info(handle);
1887
1888 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1889 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1890 (u32 *)&req, sizeof(req), sizeof(*resp));
1891 if (IS_ERR(xfer)) {
1892 ret = PTR_ERR(xfer);
1893 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1894 return ret;
1895 }
1896 req.processor_id = proc_id;
1897 req.control_flags_set = control_flags_set;
1898 req.control_flags_clear = control_flags_clear;
1899
1900 ret = ti_sci_do_xfer(info, xfer);
1901 if (ret) {
1902 dev_err(info->dev, "Mbox send fail %d\n", ret);
1903 return ret;
1904 }
1905
1906 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1907
1908 if (!ti_sci_is_response_ack(resp))
1909 ret = -ENODEV;
1910
1911 return ret;
1912}
1913
1914/**
1915 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1916 * image and then set the processor configuration flags.
1917 * @handle: Pointer to TI SCI handle
1918 * @proc_id: Processor ID this request is for
1919 * @cert_addr: Memory address at which payload image certificate is located.
1920 *
1921 * Return: 0 if all went well, else returns appropriate error value.
1922 */
1923static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
1924 u8 proc_id, u64 cert_addr)
1925{
1926 struct ti_sci_msg_req_proc_auth_boot_image req;
1927 struct ti_sci_msg_hdr *resp;
1928 struct ti_sci_info *info;
1929 struct ti_sci_xfer *xfer;
1930 int ret = 0;
1931
1932 if (IS_ERR(handle))
1933 return PTR_ERR(handle);
1934 if (!handle)
1935 return -EINVAL;
1936
1937 info = handle_to_ti_sci_info(handle);
1938
1939 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1940 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1941 (u32 *)&req, sizeof(req), sizeof(*resp));
1942 if (IS_ERR(xfer)) {
1943 ret = PTR_ERR(xfer);
1944 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1945 return ret;
1946 }
1947 req.processor_id = proc_id;
1948 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1949 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1950 TISCI_ADDR_HIGH_SHIFT;
1951
1952 ret = ti_sci_do_xfer(info, xfer);
1953 if (ret) {
1954 dev_err(info->dev, "Mbox send fail %d\n", ret);
1955 return ret;
1956 }
1957
1958 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1959
1960 if (!ti_sci_is_response_ack(resp))
1961 ret = -ENODEV;
1962
1963 return ret;
1964}
1965
1966/**
1967 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1968 * @handle: Pointer to TI SCI handle
1969 * @proc_id: Processor ID this request is for
1970 *
1971 * Return: 0 if all went well, else returns appropriate error value.
1972 */
1973static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1974 u8 proc_id, u64 *bv, u32 *cfg_flags,
1975 u32 *ctrl_flags, u32 *sts_flags)
1976{
1977 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1978 struct ti_sci_msg_req_get_proc_boot_status req;
1979 struct ti_sci_info *info;
1980 struct ti_sci_xfer *xfer;
1981 int ret = 0;
1982
1983 if (IS_ERR(handle))
1984 return PTR_ERR(handle);
1985 if (!handle)
1986 return -EINVAL;
1987
1988 info = handle_to_ti_sci_info(handle);
1989
1990 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1991 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1992 (u32 *)&req, sizeof(req), sizeof(*resp));
1993 if (IS_ERR(xfer)) {
1994 ret = PTR_ERR(xfer);
1995 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1996 return ret;
1997 }
1998 req.processor_id = proc_id;
1999
2000 ret = ti_sci_do_xfer(info, xfer);
2001 if (ret) {
2002 dev_err(info->dev, "Mbox send fail %d\n", ret);
2003 return ret;
2004 }
2005
2006 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2007 xfer->tx_message.buf;
2008
2009 if (!ti_sci_is_response_ack(resp))
2010 return -ENODEV;
2011 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2012 (((u64)resp->bootvector_high <<
2013 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2014 *cfg_flags = resp->config_flags;
2015 *ctrl_flags = resp->control_flags;
2016 *sts_flags = resp->status_flags;
2017
2018 return ret;
2019}
2020
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302021/**
2022 * ti_sci_cmd_ring_config() - configure RA ring
2023 * @handle: pointer to TI SCI handle
2024 * @valid_params: Bitfield defining validity of ring configuration parameters.
2025 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2026 * @index: Ring index.
2027 * @addr_lo: The ring base address lo 32 bits
2028 * @addr_hi: The ring base address hi 32 bits
2029 * @count: Number of ring elements.
2030 * @mode: The mode of the ring
2031 * @size: The ring element size.
2032 * @order_id: Specifies the ring's bus order ID.
2033 *
2034 * Return: 0 if all went well, else returns appropriate error value.
2035 *
2036 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2037 */
2038static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2039 u32 valid_params, u16 nav_id, u16 index,
2040 u32 addr_lo, u32 addr_hi, u32 count,
2041 u8 mode, u8 size, u8 order_id)
2042{
2043 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2044 struct ti_sci_msg_rm_ring_cfg_req req;
2045 struct ti_sci_xfer *xfer;
2046 struct ti_sci_info *info;
2047 int ret = 0;
2048
2049 if (IS_ERR(handle))
2050 return PTR_ERR(handle);
2051 if (!handle)
2052 return -EINVAL;
2053
2054 info = handle_to_ti_sci_info(handle);
2055
2056 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2057 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2058 (u32 *)&req, sizeof(req), sizeof(*resp));
2059 if (IS_ERR(xfer)) {
2060 ret = PTR_ERR(xfer);
2061 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2062 return ret;
2063 }
2064 req.valid_params = valid_params;
2065 req.nav_id = nav_id;
2066 req.index = index;
2067 req.addr_lo = addr_lo;
2068 req.addr_hi = addr_hi;
2069 req.count = count;
2070 req.mode = mode;
2071 req.size = size;
2072 req.order_id = order_id;
2073
2074 ret = ti_sci_do_xfer(info, xfer);
2075 if (ret) {
2076 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2077 goto fail;
2078 }
2079
2080 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2081
2082 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2083
2084fail:
2085 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2086 return ret;
2087}
2088
2089/**
2090 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2091 * @handle: pointer to TI SCI handle
2092 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2093 * @index: Ring index.
2094 * @addr_lo: returns ring's base address lo 32 bits
2095 * @addr_hi: returns ring's base address hi 32 bits
2096 * @count: returns number of ring elements.
2097 * @mode: returns mode of the ring
2098 * @size: returns ring element size.
2099 * @order_id: returns ring's bus order ID.
2100 *
2101 * Return: 0 if all went well, else returns appropriate error value.
2102 *
2103 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2104 */
2105static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2106 u32 nav_id, u32 index, u8 *mode,
2107 u32 *addr_lo, u32 *addr_hi,
2108 u32 *count, u8 *size, u8 *order_id)
2109{
2110 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2111 struct ti_sci_msg_rm_ring_get_cfg_req req;
2112 struct ti_sci_xfer *xfer;
2113 struct ti_sci_info *info;
2114 int ret = 0;
2115
2116 if (IS_ERR(handle))
2117 return PTR_ERR(handle);
2118 if (!handle)
2119 return -EINVAL;
2120
2121 info = handle_to_ti_sci_info(handle);
2122
2123 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2124 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2125 (u32 *)&req, sizeof(req), sizeof(*resp));
2126 if (IS_ERR(xfer)) {
2127 ret = PTR_ERR(xfer);
2128 dev_err(info->dev,
2129 "RM_RA:Message get config failed(%d)\n", ret);
2130 return ret;
2131 }
2132 req.nav_id = nav_id;
2133 req.index = index;
2134
2135 ret = ti_sci_do_xfer(info, xfer);
2136 if (ret) {
2137 dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2138 goto fail;
2139 }
2140
2141 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf;
2142
2143 if (!ti_sci_is_response_ack(resp)) {
2144 ret = -ENODEV;
2145 } else {
2146 if (mode)
2147 *mode = resp->mode;
2148 if (addr_lo)
2149 *addr_lo = resp->addr_lo;
2150 if (addr_hi)
2151 *addr_hi = resp->addr_hi;
2152 if (count)
2153 *count = resp->count;
2154 if (size)
2155 *size = resp->size;
2156 if (order_id)
2157 *order_id = resp->order_id;
2158 };
2159
2160fail:
2161 dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2162 return ret;
2163}
2164
2165static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2166 u32 nav_id, u32 src_thread, u32 dst_thread)
2167{
2168 struct ti_sci_msg_hdr *resp;
2169 struct ti_sci_msg_psil_pair req;
2170 struct ti_sci_xfer *xfer;
2171 struct ti_sci_info *info;
2172 int ret = 0;
2173
2174 if (IS_ERR(handle))
2175 return PTR_ERR(handle);
2176 if (!handle)
2177 return -EINVAL;
2178
2179 info = handle_to_ti_sci_info(handle);
2180
2181 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2182 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2183 (u32 *)&req, sizeof(req), sizeof(*resp));
2184 if (IS_ERR(xfer)) {
2185 ret = PTR_ERR(xfer);
2186 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2187 return ret;
2188 }
2189 req.nav_id = nav_id;
2190 req.src_thread = src_thread;
2191 req.dst_thread = dst_thread;
2192
2193 ret = ti_sci_do_xfer(info, xfer);
2194 if (ret) {
2195 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2196 goto fail;
2197 }
2198
2199 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2200 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2201
2202fail:
2203 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2204 nav_id, src_thread, dst_thread, ret);
2205 return ret;
2206}
2207
2208static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2209 u32 nav_id, u32 src_thread, u32 dst_thread)
2210{
2211 struct ti_sci_msg_hdr *resp;
2212 struct ti_sci_msg_psil_unpair req;
2213 struct ti_sci_xfer *xfer;
2214 struct ti_sci_info *info;
2215 int ret = 0;
2216
2217 if (IS_ERR(handle))
2218 return PTR_ERR(handle);
2219 if (!handle)
2220 return -EINVAL;
2221
2222 info = handle_to_ti_sci_info(handle);
2223
2224 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2225 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2226 (u32 *)&req, sizeof(req), sizeof(*resp));
2227 if (IS_ERR(xfer)) {
2228 ret = PTR_ERR(xfer);
2229 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2230 return ret;
2231 }
2232 req.nav_id = nav_id;
2233 req.src_thread = src_thread;
2234 req.dst_thread = dst_thread;
2235
2236 ret = ti_sci_do_xfer(info, xfer);
2237 if (ret) {
2238 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2239 goto fail;
2240 }
2241
2242 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2243 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2244
2245fail:
2246 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2247 src_thread, dst_thread, ret);
2248 return ret;
2249}
2250
2251static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2252 const struct ti_sci_handle *handle,
2253 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2254{
2255 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2256 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2257 struct ti_sci_xfer *xfer;
2258 struct ti_sci_info *info;
2259 int ret = 0;
2260
2261 if (IS_ERR(handle))
2262 return PTR_ERR(handle);
2263 if (!handle)
2264 return -EINVAL;
2265
2266 info = handle_to_ti_sci_info(handle);
2267
2268 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2269 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2270 (u32 *)&req, sizeof(req), sizeof(*resp));
2271 if (IS_ERR(xfer)) {
2272 ret = PTR_ERR(xfer);
2273 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2274 return ret;
2275 }
2276 req.valid_params = params->valid_params;
2277 req.nav_id = params->nav_id;
2278 req.index = params->index;
2279 req.tx_pause_on_err = params->tx_pause_on_err;
2280 req.tx_filt_einfo = params->tx_filt_einfo;
2281 req.tx_filt_pswords = params->tx_filt_pswords;
2282 req.tx_atype = params->tx_atype;
2283 req.tx_chan_type = params->tx_chan_type;
2284 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2285 req.tx_fetch_size = params->tx_fetch_size;
2286 req.tx_credit_count = params->tx_credit_count;
2287 req.txcq_qnum = params->txcq_qnum;
2288 req.tx_priority = params->tx_priority;
2289 req.tx_qos = params->tx_qos;
2290 req.tx_orderid = params->tx_orderid;
2291 req.fdepth = params->fdepth;
2292 req.tx_sched_priority = params->tx_sched_priority;
2293
2294 ret = ti_sci_do_xfer(info, xfer);
2295 if (ret) {
2296 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2297 goto fail;
2298 }
2299
2300 resp =
2301 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2302 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2303
2304fail:
2305 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2306 return ret;
2307}
2308
2309static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2310 const struct ti_sci_handle *handle,
2311 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2312{
2313 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2314 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2315 struct ti_sci_xfer *xfer;
2316 struct ti_sci_info *info;
2317 int ret = 0;
2318
2319 if (IS_ERR(handle))
2320 return PTR_ERR(handle);
2321 if (!handle)
2322 return -EINVAL;
2323
2324 info = handle_to_ti_sci_info(handle);
2325
2326 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2327 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2328 (u32 *)&req, sizeof(req), sizeof(*resp));
2329 if (IS_ERR(xfer)) {
2330 ret = PTR_ERR(xfer);
2331 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2332 return ret;
2333 }
2334
2335 req.valid_params = params->valid_params;
2336 req.nav_id = params->nav_id;
2337 req.index = params->index;
2338 req.rx_fetch_size = params->rx_fetch_size;
2339 req.rxcq_qnum = params->rxcq_qnum;
2340 req.rx_priority = params->rx_priority;
2341 req.rx_qos = params->rx_qos;
2342 req.rx_orderid = params->rx_orderid;
2343 req.rx_sched_priority = params->rx_sched_priority;
2344 req.flowid_start = params->flowid_start;
2345 req.flowid_cnt = params->flowid_cnt;
2346 req.rx_pause_on_err = params->rx_pause_on_err;
2347 req.rx_atype = params->rx_atype;
2348 req.rx_chan_type = params->rx_chan_type;
2349 req.rx_ignore_short = params->rx_ignore_short;
2350 req.rx_ignore_long = params->rx_ignore_long;
2351
2352 ret = ti_sci_do_xfer(info, xfer);
2353 if (ret) {
2354 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2355 goto fail;
2356 }
2357
2358 resp =
2359 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2360 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2361
2362fail:
2363 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2364 return ret;
2365}
2366
2367static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2368 const struct ti_sci_handle *handle,
2369 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2370{
2371 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2372 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2373 struct ti_sci_xfer *xfer;
2374 struct ti_sci_info *info;
2375 int ret = 0;
2376
2377 if (IS_ERR(handle))
2378 return PTR_ERR(handle);
2379 if (!handle)
2380 return -EINVAL;
2381
2382 info = handle_to_ti_sci_info(handle);
2383
2384 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2385 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2386 (u32 *)&req, sizeof(req), sizeof(*resp));
2387 if (IS_ERR(xfer)) {
2388 ret = PTR_ERR(xfer);
2389 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2390 return ret;
2391 }
2392
2393 req.valid_params = params->valid_params;
2394 req.nav_id = params->nav_id;
2395 req.flow_index = params->flow_index;
2396 req.rx_einfo_present = params->rx_einfo_present;
2397 req.rx_psinfo_present = params->rx_psinfo_present;
2398 req.rx_error_handling = params->rx_error_handling;
2399 req.rx_desc_type = params->rx_desc_type;
2400 req.rx_sop_offset = params->rx_sop_offset;
2401 req.rx_dest_qnum = params->rx_dest_qnum;
2402 req.rx_src_tag_hi = params->rx_src_tag_hi;
2403 req.rx_src_tag_lo = params->rx_src_tag_lo;
2404 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2405 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2406 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2407 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2408 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2409 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2410 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2411 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2412 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2413 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2414 req.rx_ps_location = params->rx_ps_location;
2415
2416 ret = ti_sci_do_xfer(info, xfer);
2417 if (ret) {
2418 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2419 goto fail;
2420 }
2421
2422 resp =
2423 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2424 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2425
2426fail:
2427 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2428 return ret;
2429}
2430
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302431/*
2432 * ti_sci_setup_ops() - Setup the operations structures
2433 * @info: pointer to TISCI pointer
2434 */
2435static void ti_sci_setup_ops(struct ti_sci_info *info)
2436{
2437 struct ti_sci_ops *ops = &info->handle.ops;
2438 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302439 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302440 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302441 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302442 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302443 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302444 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2445 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2446 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302447
2448 bops->board_config = ti_sci_cmd_set_board_config;
2449 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2450 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2451 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302452
2453 dops->get_device = ti_sci_cmd_get_device;
2454 dops->idle_device = ti_sci_cmd_idle_device;
2455 dops->put_device = ti_sci_cmd_put_device;
2456 dops->is_valid = ti_sci_cmd_dev_is_valid;
2457 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2458 dops->is_idle = ti_sci_cmd_dev_is_idle;
2459 dops->is_stop = ti_sci_cmd_dev_is_stop;
2460 dops->is_on = ti_sci_cmd_dev_is_on;
2461 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2462 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2463 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302464
2465 cops->get_clock = ti_sci_cmd_get_clock;
2466 cops->idle_clock = ti_sci_cmd_idle_clock;
2467 cops->put_clock = ti_sci_cmd_put_clock;
2468 cops->is_auto = ti_sci_cmd_clk_is_auto;
2469 cops->is_on = ti_sci_cmd_clk_is_on;
2470 cops->is_off = ti_sci_cmd_clk_is_off;
2471
2472 cops->set_parent = ti_sci_cmd_clk_set_parent;
2473 cops->get_parent = ti_sci_cmd_clk_get_parent;
2474 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2475
2476 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2477 cops->set_freq = ti_sci_cmd_clk_set_freq;
2478 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302479
2480 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla826eb742019-03-08 11:47:32 +05302481 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302482
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302483 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2484 rm_core_ops->get_range_from_shost =
2485 ti_sci_cmd_get_resource_range_from_shost;
2486
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302487 pops->proc_request = ti_sci_cmd_proc_request;
2488 pops->proc_release = ti_sci_cmd_proc_release;
2489 pops->proc_handover = ti_sci_cmd_proc_handover;
2490 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2491 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2492 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2493 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302494
2495 rops->config = ti_sci_cmd_ring_config;
2496 rops->get_config = ti_sci_cmd_ring_get_config;
2497
2498 psilops->pair = ti_sci_cmd_rm_psil_pair;
2499 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2500
2501 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2502 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2503 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302504}
2505
2506/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302507 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2508 * @dev: Pointer to the SYSFW device
2509 *
2510 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2511 * are encountered.
2512 */
2513const
2514struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2515{
2516 if (!sci_dev)
2517 return ERR_PTR(-EINVAL);
2518
2519 struct ti_sci_info *info = dev_get_priv(sci_dev);
2520
2521 if (!info)
2522 return ERR_PTR(-EINVAL);
2523
2524 struct ti_sci_handle *handle = &info->handle;
2525
2526 if (!handle)
2527 return ERR_PTR(-EINVAL);
2528
2529 return handle;
2530}
2531
2532/**
2533 * ti_sci_get_handle() - Get the TI SCI handle for a device
2534 * @dev: Pointer to device for which we want SCI handle
2535 *
2536 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2537 * are encountered.
2538 */
2539const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2540{
2541 if (!dev)
2542 return ERR_PTR(-EINVAL);
2543
2544 struct udevice *sci_dev = dev_get_parent(dev);
2545
2546 return ti_sci_get_handle_from_sysfw(sci_dev);
2547}
2548
2549/**
2550 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2551 * @dev: device node
2552 * @propname: property name containing phandle on TISCI node
2553 *
2554 * Return: pointer to handle if successful, else appropriate error value.
2555 */
2556const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2557 const char *property)
2558{
2559 struct ti_sci_info *entry, *info = NULL;
2560 u32 phandle, err;
2561 ofnode node;
2562
2563 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2564 if (err)
2565 return ERR_PTR(err);
2566
2567 node = ofnode_get_by_phandle(phandle);
2568 if (!ofnode_valid(node))
2569 return ERR_PTR(-EINVAL);
2570
2571 list_for_each_entry(entry, &ti_sci_list, list)
2572 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2573 info = entry;
2574 break;
2575 }
2576
2577 if (!info)
2578 return ERR_PTR(-ENODEV);
2579
2580 return &info->handle;
2581}
2582
2583/**
2584 * ti_sci_of_to_info() - generate private data from device tree
2585 * @dev: corresponding system controller interface device
2586 * @info: pointer to driver specific private data
2587 *
2588 * Return: 0 if all goes good, else appropriate error message.
2589 */
2590static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2591{
2592 int ret;
2593
2594 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2595 if (ret) {
2596 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2597 __func__, ret);
2598 return ret;
2599 }
2600
2601 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2602 if (ret) {
2603 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2604 __func__, ret);
2605 return ret;
2606 }
2607
2608 /* Notify channel is optional. Enable only if populated */
2609 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2610 if (ret) {
2611 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2612 __func__, ret);
2613 }
2614
2615 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302616 info->desc->default_host_id);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302617
2618 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2619
2620 return 0;
2621}
2622
2623/**
2624 * ti_sci_probe() - Basic probe
2625 * @dev: corresponding system controller interface device
2626 *
2627 * Return: 0 if all goes good, else appropriate error message.
2628 */
2629static int ti_sci_probe(struct udevice *dev)
2630{
2631 struct ti_sci_info *info;
2632 int ret;
2633
2634 debug("%s(dev=%p)\n", __func__, dev);
2635
2636 info = dev_get_priv(dev);
2637 info->desc = (void *)dev_get_driver_data(dev);
2638
2639 ret = ti_sci_of_to_info(dev, info);
2640 if (ret) {
2641 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2642 return ret;
2643 }
2644
2645 info->dev = dev;
2646 info->seq = 0xA;
2647
2648 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302649 ti_sci_setup_ops(info);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302650
2651 ret = ti_sci_cmd_get_revision(&info->handle);
2652
2653 return ret;
2654}
2655
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302656/*
2657 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2658 * @res: Pointer to the TISCI resource
2659 *
2660 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2661 */
2662u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2663{
2664 u16 set, free_bit;
2665
2666 for (set = 0; set < res->sets; set++) {
2667 free_bit = find_first_zero_bit(res->desc[set].res_map,
2668 res->desc[set].num);
2669 if (free_bit != res->desc[set].num) {
2670 set_bit(free_bit, res->desc[set].res_map);
2671 return res->desc[set].start + free_bit;
2672 }
2673 }
2674
2675 return TI_SCI_RESOURCE_NULL;
2676}
2677
2678/**
2679 * ti_sci_release_resource() - Release a resource from TISCI resource.
2680 * @res: Pointer to the TISCI resource
2681 */
2682void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2683{
2684 u16 set;
2685
2686 for (set = 0; set < res->sets; set++) {
2687 if (res->desc[set].start <= id &&
2688 (res->desc[set].num + res->desc[set].start) > id)
2689 clear_bit(id - res->desc[set].start,
2690 res->desc[set].res_map);
2691 }
2692}
2693
2694/**
2695 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2696 * @handle: TISCI handle
2697 * @dev: Device pointer to which the resource is assigned
2698 * @of_prop: property name by which the resource are represented
2699 *
2700 * Note: This function expects of_prop to be in the form of tuples
2701 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2702 * for each of_prop. Client driver can directly call
2703 * ti_sci_(get_free, release)_resource apis for handling the resource.
2704 *
2705 * Return: Pointer to ti_sci_resource if all went well else appropriate
2706 * error pointer.
2707 */
2708struct ti_sci_resource *
2709devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2710 struct udevice *dev, u32 dev_id, char *of_prop)
2711{
2712 u32 resource_subtype;
2713 u16 resource_type;
2714 struct ti_sci_resource *res;
2715 int sets, i, ret;
2716 u32 *temp;
2717
2718 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2719 if (!res)
2720 return ERR_PTR(-ENOMEM);
2721
2722 sets = dev_read_size(dev, of_prop);
2723 if (sets < 0) {
2724 dev_err(dev, "%s resource type ids not available\n", of_prop);
2725 return ERR_PTR(sets);
2726 }
2727 temp = malloc(sets);
2728 sets /= sizeof(u32);
2729 res->sets = sets;
2730
2731 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2732 GFP_KERNEL);
2733 if (!res->desc)
2734 return ERR_PTR(-ENOMEM);
2735
2736 ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
2737 &resource_type);
2738 if (ret) {
2739 dev_err(dev, "No valid resource type for %u\n", dev_id);
2740 return ERR_PTR(-EINVAL);
2741 }
2742
2743 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2744 if (ret)
2745 return ERR_PTR(-EINVAL);
2746
2747 for (i = 0; i < res->sets; i++) {
2748 resource_subtype = temp[i];
2749 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2750 resource_subtype,
2751 &res->desc[i].start,
2752 &res->desc[i].num);
2753 if (ret) {
2754 dev_err(dev, "type %d subtype %d not allocated for host %d\n",
2755 resource_type, resource_subtype,
2756 handle_to_ti_sci_info(handle)->host_id);
2757 return ERR_PTR(ret);
2758 }
2759
2760 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
2761 resource_type, resource_subtype, res->desc[i].start,
2762 res->desc[i].num);
2763
2764 res->desc[i].res_map =
2765 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2766 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2767 if (!res->desc[i].res_map)
2768 return ERR_PTR(-ENOMEM);
2769 }
2770
2771 return res;
2772}
2773
2774/* Description for K2G */
2775static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2776 .default_host_id = 2,
2777 /* Conservative duration */
2778 .max_rx_timeout_ms = 10000,
2779 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2780 .max_msgs = 20,
2781 .max_msg_size = 64,
2782 .rm_type_map = NULL,
2783};
2784
2785static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2786 {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2787 {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2788 {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2789 {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2790 {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2791 {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2792 {.dev_id = 0, .type = 0x000}, /* end of table */
2793};
2794
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302795/* Description for AM654 */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302796static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2797 .default_host_id = 12,
2798 /* Conservative duration */
2799 .max_rx_timeout_ms = 10000,
2800 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2801 .max_msgs = 20,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302802 .max_msg_size = 60,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302803 .rm_type_map = ti_sci_am654_rm_type_map,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302804};
2805
2806static const struct udevice_id ti_sci_ids[] = {
2807 {
2808 .compatible = "ti,k2g-sci",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302809 .data = (ulong)&ti_sci_pmmc_k2g_desc
2810 },
2811 {
2812 .compatible = "ti,am654-sci",
2813 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302814 },
2815 { /* Sentinel */ },
2816};
2817
2818U_BOOT_DRIVER(ti_sci) = {
2819 .name = "ti_sci",
2820 .id = UCLASS_FIRMWARE,
2821 .of_match = ti_sci_ids,
2822 .probe = ti_sci_probe,
2823 .priv_auto_alloc_size = sizeof(struct ti_sci_info),
2824};