blob: 687acbf2b4128667e2e1b39b3f1d9b4b0f3f1909 [file] [log] [blame]
Lokesh Vutla32cd2512018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053014#include <mailbox.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053016#include <dm/device.h>
Simon Glass336d4612020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070018#include <dm/devres.h>
Simon Glasscd93d622020-05-10 11:40:13 -060019#include <linux/bitops.h>
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053020#include <linux/compat.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053021#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
Vignesh Raghavendra0e811582021-06-07 19:47:48 +053026#include "ti_sci_static_data.h"
Lokesh Vutla32cd2512018-08-27 15:57:32 +053027
28/* List of all TI SCI devices active in system */
29static LIST_HEAD(ti_sci_list);
30
31/**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_len: Receive message length
35 */
36struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 u8 rx_len;
39};
40
41/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053042 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
43 * management representation of dev_ids.
44 * @dev_id: TISCI device ID
45 * @type: Corresponding id as identified by TISCI RM.
46 *
47 * Note: This is used only as a work around for using RM range apis
48 * for AM654 SoC. For future SoCs dev_id will be used as type
49 * for RM range APIs. In order to maintain ABI backward compatibility
50 * type is not being changed for AM654 SoC.
51 */
52struct ti_sci_rm_type_map {
53 u32 dev_id;
54 u16 type;
55};
56
57/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +053058 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053059 * @default_host_id: Host identifier representing the compute entity
60 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
61 * @max_msgs: Maximum number of messages that can be pending
62 * simultaneously in the system
63 * @max_msg_size: Maximum size of data per message that can be handled.
Lokesh Vutla32cd2512018-08-27 15:57:32 +053064 */
65struct ti_sci_desc {
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053066 u8 default_host_id;
67 int max_rx_timeout_ms;
68 int max_msgs;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053069 int max_msg_size;
70};
71
72/**
73 * struct ti_sci_info - Structure representing a TI SCI instance
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @handle: Instance of TI SCI handle to send to clients.
77 * @chan_tx: Transmit mailbox channel
78 * @chan_rx: Receive mailbox channel
79 * @xfer: xfer info
80 * @list: list head
81 * @is_secure: Determines if the communication is through secure threads.
82 * @host_id: Host identifier representing the compute entity
83 * @seq: Seq id used for verification for tx and rx message.
84 */
85struct ti_sci_info {
86 struct udevice *dev;
87 const struct ti_sci_desc *desc;
88 struct ti_sci_handle handle;
89 struct mbox_chan chan_tx;
90 struct mbox_chan chan_rx;
91 struct mbox_chan chan_notify;
92 struct ti_sci_xfer xfer;
93 struct list_head list;
Lokesh Vutla9566b772019-06-07 19:24:41 +053094 struct list_head dev_list;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053095 bool is_secure;
96 u8 host_id;
97 u8 seq;
98};
99
Lokesh Vutla9566b772019-06-07 19:24:41 +0530100struct ti_sci_exclusive_dev {
101 u32 id;
102 u32 count;
103 struct list_head list;
104};
105
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
107
108/**
109 * ti_sci_setup_one_xfer() - Setup one message type
110 * @info: Pointer to SCI entity information
111 * @msg_type: Message type
112 * @msg_flags: Flag to set for the message
113 * @buf: Buffer to be send to mailbox channel
114 * @tx_message_size: transmit message size
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530115 * @rx_message_size: receive message size. may be set to zero for send-only
116 * transactions.
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530117 *
118 * Helper function which is used by various command functions that are
119 * exposed to clients of this driver for allocating a message traffic event.
120 *
121 * Return: Corresponding ti_sci_xfer pointer if all went fine,
122 * else appropriate error pointer.
123 */
124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
125 u16 msg_type, u32 msg_flags,
126 u32 *buf,
127 size_t tx_message_size,
128 size_t rx_message_size)
129{
130 struct ti_sci_xfer *xfer = &info->xfer;
131 struct ti_sci_msg_hdr *hdr;
132
133 /* Ensure we have sane transfer sizes */
134 if (rx_message_size > info->desc->max_msg_size ||
135 tx_message_size > info->desc->max_msg_size ||
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530136 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
Andrew Davis0d74f262022-07-25 20:25:04 -0500137 tx_message_size < sizeof(*hdr)) {
138 dev_err(info->dev, "TI-SCI message transfer size not sane\n");
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530139 return ERR_PTR(-ERANGE);
Andrew Davis0d74f262022-07-25 20:25:04 -0500140 }
141
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530142
143 info->seq = ~info->seq;
144 xfer->tx_message.buf = buf;
145 xfer->tx_message.len = tx_message_size;
146 xfer->rx_len = (u8)rx_message_size;
147
148 hdr = (struct ti_sci_msg_hdr *)buf;
149 hdr->seq = info->seq;
150 hdr->type = msg_type;
151 hdr->host = info->host_id;
152 hdr->flags = msg_flags;
153
154 return xfer;
155}
156
157/**
158 * ti_sci_get_response() - Receive response from mailbox channel
159 * @info: Pointer to SCI entity information
160 * @xfer: Transfer to initiate and wait for response
161 * @chan: Channel to receive the response
162 *
163 * Return: -ETIMEDOUT in case of no response, if transmit error,
164 * return corresponding error, else if all goes well,
165 * return 0.
166 */
167static inline int ti_sci_get_response(struct ti_sci_info *info,
168 struct ti_sci_xfer *xfer,
169 struct mbox_chan *chan)
170{
171 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
172 struct ti_sci_secure_msg_hdr *secure_hdr;
173 struct ti_sci_msg_hdr *hdr;
174 int ret;
175
176 /* Receive the response */
Andreas Dannenberg32aebcf2019-04-24 14:20:08 -0500177 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530178 if (ret) {
179 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
180 __func__, ret);
181 return ret;
182 }
183
184 /* ToDo: Verify checksum */
185 if (info->is_secure) {
186 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
187 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
188 }
189
190 /* msg is updated by mailbox driver */
191 hdr = (struct ti_sci_msg_hdr *)msg->buf;
192
193 /* Sanity check for message response */
194 if (hdr->seq != info->seq) {
195 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
196 __func__, hdr->seq);
197 return ret;
198 }
199
200 if (msg->len > info->desc->max_msg_size) {
201 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
202 __func__, msg->len, info->desc->max_msg_size);
203 return -EINVAL;
204 }
205
206 if (msg->len < xfer->rx_len) {
207 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
208 __func__, msg->len, xfer->rx_len);
209 }
210
211 return ret;
212}
213
214/**
215 * ti_sci_do_xfer() - Do one transfer
216 * @info: Pointer to SCI entity information
217 * @xfer: Transfer to initiate and wait for response
218 *
219 * Return: 0 if all went fine, else return appropriate error.
220 */
221static inline int ti_sci_do_xfer(struct ti_sci_info *info,
222 struct ti_sci_xfer *xfer)
223{
224 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
225 u8 secure_buf[info->desc->max_msg_size];
226 struct ti_sci_secure_msg_hdr secure_hdr;
227 int ret;
228
229 if (info->is_secure) {
230 /* ToDo: get checksum of the entire message */
231 secure_hdr.checksum = 0;
232 secure_hdr.reserved = 0;
233 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
234 xfer->tx_message.len);
235
236 xfer->tx_message.buf = (u32 *)secure_buf;
237 xfer->tx_message.len += sizeof(secure_hdr);
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530238
239 if (xfer->rx_len)
240 xfer->rx_len += sizeof(secure_hdr);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530241 }
242
243 /* Send the message */
244 ret = mbox_send(&info->chan_tx, msg);
245 if (ret) {
246 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
247 __func__, ret);
248 return ret;
249 }
250
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530251 /* Get response if requested */
252 if (xfer->rx_len)
253 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
254
255 return ret;
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530256}
257
258/**
259 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
260 * @handle: pointer to TI SCI handle
261 *
262 * Updates the SCI information in the internal data structure.
263 *
264 * Return: 0 if all went fine, else return appropriate error.
265 */
266static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
267{
268 struct ti_sci_msg_resp_version *rev_info;
269 struct ti_sci_version_info *ver;
270 struct ti_sci_msg_hdr hdr;
271 struct ti_sci_info *info;
272 struct ti_sci_xfer *xfer;
273 int ret;
274
275 if (IS_ERR(handle))
276 return PTR_ERR(handle);
277 if (!handle)
278 return -EINVAL;
279
280 info = handle_to_ti_sci_info(handle);
281
Andrew F. Davisefbfd442019-04-29 09:04:11 -0400282 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
283 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530284 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
285 sizeof(*rev_info));
286 if (IS_ERR(xfer)) {
287 ret = PTR_ERR(xfer);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530288 return ret;
289 }
290
291 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500292 if (ret)
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530293 return ret;
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530294
295 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
296
297 ver = &handle->version;
298 ver->abi_major = rev_info->abi_major;
299 ver->abi_minor = rev_info->abi_minor;
300 ver->firmware_revision = rev_info->firmware_revision;
301 strncpy(ver->firmware_description, rev_info->firmware_description,
302 sizeof(ver->firmware_description));
303
304 return 0;
305}
306
307/**
308 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
309 * @r: pointer to response buffer
310 *
311 * Return: true if the response was an ACK, else returns false.
312 */
313static inline bool ti_sci_is_response_ack(void *r)
314{
315 struct ti_sci_msg_hdr *hdr = r;
316
317 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
318}
319
320/**
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530321 * cmd_set_board_config_using_msg() - Common command to send board configuration
322 * message
323 * @handle: pointer to TI SCI handle
324 * @msg_type: One of the TISCI message types to set board configuration
325 * @addr: Address where the board config structure is located
326 * @size: Size of the board config structure
327 *
328 * Return: 0 if all went well, else returns appropriate error value.
329 */
330static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
331 u16 msg_type, u64 addr, u32 size)
332{
333 struct ti_sci_msg_board_config req;
334 struct ti_sci_msg_hdr *resp;
335 struct ti_sci_info *info;
336 struct ti_sci_xfer *xfer;
337 int ret = 0;
338
339 if (IS_ERR(handle))
340 return PTR_ERR(handle);
341 if (!handle)
342 return -EINVAL;
343
344 info = handle_to_ti_sci_info(handle);
345
346 xfer = ti_sci_setup_one_xfer(info, msg_type,
347 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
348 (u32 *)&req, sizeof(req), sizeof(*resp));
349 if (IS_ERR(xfer)) {
350 ret = PTR_ERR(xfer);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530351 return ret;
352 }
353 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
354 req.boardcfgp_low = addr & 0xffffffff;
355 req.boardcfg_size = size;
356
357 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500358 if (ret)
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530359 return ret;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530360
361 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
362
363 if (!ti_sci_is_response_ack(resp))
364 return -ENODEV;
365
366 return ret;
367}
368
369/**
370 * ti_sci_cmd_set_board_config() - Command to send board configuration message
371 * @handle: pointer to TI SCI handle
372 * @addr: Address where the board config structure is located
373 * @size: Size of the board config structure
374 *
375 * Return: 0 if all went well, else returns appropriate error value.
376 */
377static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
378 u64 addr, u32 size)
379{
380 return cmd_set_board_config_using_msg(handle,
381 TI_SCI_MSG_BOARD_CONFIG,
382 addr, size);
383}
384
385/**
386 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
387 * management configuration
388 * @handle: pointer to TI SCI handle
389 * @addr: Address where the board RM config structure is located
390 * @size: Size of the RM config structure
391 *
392 * Return: 0 if all went well, else returns appropriate error value.
393 */
394static
395int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
396 u64 addr, u32 size)
397{
398 return cmd_set_board_config_using_msg(handle,
399 TI_SCI_MSG_BOARD_CONFIG_RM,
400 addr, size);
401}
402
403/**
404 * ti_sci_cmd_set_board_config_security() - Command to send board security
405 * configuration message
406 * @handle: pointer to TI SCI handle
407 * @addr: Address where the board security config structure is located
408 * @size: Size of the security config structure
409 *
410 * Return: 0 if all went well, else returns appropriate error value.
411 */
412static
413int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
414 u64 addr, u32 size)
415{
416 return cmd_set_board_config_using_msg(handle,
417 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
418 addr, size);
419}
420
421/**
422 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
423 * configuration message
424 * @handle: pointer to TI SCI handle
425 * @addr: Address where the board PM config structure is located
426 * @size: Size of the PM config structure
427 *
428 * Return: 0 if all went well, else returns appropriate error value.
429 */
430static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
431 u64 addr, u32 size)
432{
433 return cmd_set_board_config_using_msg(handle,
434 TI_SCI_MSG_BOARD_CONFIG_PM,
435 addr, size);
436}
437
Lokesh Vutla9566b772019-06-07 19:24:41 +0530438static struct ti_sci_exclusive_dev
439*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
440{
441 struct ti_sci_exclusive_dev *dev;
442
443 list_for_each_entry(dev, dev_list, list)
444 if (dev->id == id)
445 return dev;
446
447 return NULL;
448}
449
450static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
451{
452 struct ti_sci_exclusive_dev *dev;
453
454 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
455 if (dev) {
456 dev->count++;
457 return;
458 }
459
460 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
461 dev->id = id;
462 dev->count = 1;
463 INIT_LIST_HEAD(&dev->list);
464 list_add_tail(&dev->list, &info->dev_list);
465}
466
467static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
468{
469 struct ti_sci_exclusive_dev *dev;
470
471 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
472 if (!dev)
473 return;
474
475 if (dev->count > 0)
476 dev->count--;
477}
478
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530479/**
480 * ti_sci_set_device_state() - Set device state helper
481 * @handle: pointer to TI SCI handle
482 * @id: Device identifier
483 * @flags: flags to setup for the device
484 * @state: State to move the device to
485 *
486 * Return: 0 if all went well, else returns appropriate error value.
487 */
488static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
489 u32 id, u32 flags, u8 state)
490{
491 struct ti_sci_msg_req_set_device_state req;
492 struct ti_sci_msg_hdr *resp;
493 struct ti_sci_info *info;
494 struct ti_sci_xfer *xfer;
495 int ret = 0;
496
497 if (IS_ERR(handle))
498 return PTR_ERR(handle);
499 if (!handle)
500 return -EINVAL;
501
502 info = handle_to_ti_sci_info(handle);
503
504 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
505 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
506 (u32 *)&req, sizeof(req), sizeof(*resp));
507 if (IS_ERR(xfer)) {
508 ret = PTR_ERR(xfer);
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530509 return ret;
510 }
511 req.id = id;
512 req.state = state;
513
514 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500515 if (ret)
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530516 return ret;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530517
518 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
519
520 if (!ti_sci_is_response_ack(resp))
521 return -ENODEV;
522
Lokesh Vutla9566b772019-06-07 19:24:41 +0530523 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
524 ti_sci_delete_exclusive_dev(info, id);
525 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
526 ti_sci_add_exclusive_dev(info, id);
527
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530528 return ret;
529}
530
531/**
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530532 * ti_sci_set_device_state_no_wait() - Set device state helper without
533 * requesting or waiting for a response.
534 * @handle: pointer to TI SCI handle
535 * @id: Device identifier
536 * @flags: flags to setup for the device
537 * @state: State to move the device to
538 *
539 * Return: 0 if all went well, else returns appropriate error value.
540 */
541static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
542 u32 id, u32 flags, u8 state)
543{
544 struct ti_sci_msg_req_set_device_state req;
545 struct ti_sci_info *info;
546 struct ti_sci_xfer *xfer;
547 int ret = 0;
548
549 if (IS_ERR(handle))
550 return PTR_ERR(handle);
551 if (!handle)
552 return -EINVAL;
553
554 info = handle_to_ti_sci_info(handle);
555
556 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
557 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
558 (u32 *)&req, sizeof(req), 0);
559 if (IS_ERR(xfer)) {
560 ret = PTR_ERR(xfer);
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530561 return ret;
562 }
563 req.id = id;
564 req.state = state;
565
566 ret = ti_sci_do_xfer(info, xfer);
567 if (ret)
Andrew Davis59178502022-07-25 20:25:03 -0500568 return ret;
Andreas Dannenberg410adcc2019-06-07 19:24:40 +0530569
570 return ret;
571}
572
573/**
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530574 * ti_sci_get_device_state() - Get device state helper
575 * @handle: Handle to the device
576 * @id: Device Identifier
577 * @clcnt: Pointer to Context Loss Count
578 * @resets: pointer to resets
579 * @p_state: pointer to p_state
580 * @c_state: pointer to c_state
581 *
582 * Return: 0 if all went fine, else return appropriate error.
583 */
584static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
585 u32 id, u32 *clcnt, u32 *resets,
586 u8 *p_state, u8 *c_state)
587{
588 struct ti_sci_msg_resp_get_device_state *resp;
589 struct ti_sci_msg_req_get_device_state req;
590 struct ti_sci_info *info;
591 struct ti_sci_xfer *xfer;
592 int ret = 0;
593
594 if (IS_ERR(handle))
595 return PTR_ERR(handle);
596 if (!handle)
597 return -EINVAL;
598
599 if (!clcnt && !resets && !p_state && !c_state)
600 return -EINVAL;
601
602 info = handle_to_ti_sci_info(handle);
603
Andrew F. Davisefbfd442019-04-29 09:04:11 -0400604 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
605 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530606 (u32 *)&req, sizeof(req), sizeof(*resp));
607 if (IS_ERR(xfer)) {
608 ret = PTR_ERR(xfer);
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530609 return ret;
610 }
611 req.id = id;
612
613 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500614 if (ret)
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530615 return ret;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530616
617 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
618 if (!ti_sci_is_response_ack(resp))
619 return -ENODEV;
620
621 if (clcnt)
622 *clcnt = resp->context_loss_count;
623 if (resets)
624 *resets = resp->resets;
625 if (p_state)
626 *p_state = resp->programmed_state;
627 if (c_state)
628 *c_state = resp->current_state;
629
630 return ret;
631}
632
633/**
634 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
635 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
636 * @id: Device Identifier
637 *
638 * Request for the device - NOTE: the client MUST maintain integrity of
639 * usage count by balancing get_device with put_device. No refcounting is
640 * managed by driver for that purpose.
641 *
642 * NOTE: The request is for exclusive access for the processor.
643 *
644 * Return: 0 if all went fine, else return appropriate error.
645 */
646static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
647{
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530648 return ti_sci_set_device_state(handle, id, 0,
649 MSG_DEVICE_SW_STATE_ON);
650}
651
652static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
653 u32 id)
654{
655 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530656 MSG_DEVICE_SW_STATE_ON);
657}
658
659/**
660 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
661 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
662 * @id: Device Identifier
663 *
664 * Request for the device - NOTE: the client MUST maintain integrity of
665 * usage count by balancing get_device with put_device. No refcounting is
666 * managed by driver for that purpose.
667 *
668 * Return: 0 if all went fine, else return appropriate error.
669 */
670static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
671{
672 return ti_sci_set_device_state(handle, id,
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530673 0,
674 MSG_DEVICE_SW_STATE_RETENTION);
675}
676
677static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
678 u32 id)
679{
680 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530681 MSG_DEVICE_SW_STATE_RETENTION);
682}
683
684/**
685 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
686 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
687 * @id: Device Identifier
688 *
689 * Request for the device - NOTE: the client MUST maintain integrity of
690 * usage count by balancing get_device with put_device. No refcounting is
691 * managed by driver for that purpose.
692 *
693 * Return: 0 if all went fine, else return appropriate error.
694 */
695static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
696{
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +0530697 return ti_sci_set_device_state(handle, id, 0,
698 MSG_DEVICE_SW_STATE_AUTO_OFF);
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530699}
700
Lokesh Vutla9566b772019-06-07 19:24:41 +0530701static
702int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
703{
704 struct ti_sci_exclusive_dev *dev, *tmp;
705 struct ti_sci_info *info;
706 int i, cnt;
707
708 info = handle_to_ti_sci_info(handle);
709
710 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
711 cnt = dev->count;
712 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
713 for (i = 0; i < cnt; i++)
714 ti_sci_cmd_put_device(handle, dev->id);
715 }
716
717 return 0;
718}
719
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530720/**
721 * ti_sci_cmd_dev_is_valid() - Is the device valid
722 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
723 * @id: Device Identifier
724 *
725 * Return: 0 if all went fine and the device ID is valid, else return
726 * appropriate error.
727 */
728static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
729{
730 u8 unused;
731
732 /* check the device state which will also tell us if the ID is valid */
733 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
734}
735
736/**
737 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
738 * @handle: Pointer to TISCI handle
739 * @id: Device Identifier
740 * @count: Pointer to Context Loss counter to populate
741 *
742 * Return: 0 if all went fine, else return appropriate error.
743 */
744static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
745 u32 *count)
746{
747 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
748}
749
750/**
751 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
752 * @handle: Pointer to TISCI handle
753 * @id: Device Identifier
754 * @r_state: true if requested to be idle
755 *
756 * Return: 0 if all went fine, else return appropriate error.
757 */
758static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
759 bool *r_state)
760{
761 int ret;
762 u8 state;
763
764 if (!r_state)
765 return -EINVAL;
766
767 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
768 if (ret)
769 return ret;
770
771 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
772
773 return 0;
774}
775
776/**
777 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
778 * @handle: Pointer to TISCI handle
779 * @id: Device Identifier
780 * @r_state: true if requested to be stopped
781 * @curr_state: true if currently stopped.
782 *
783 * Return: 0 if all went fine, else return appropriate error.
784 */
785static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
786 bool *r_state, bool *curr_state)
787{
788 int ret;
789 u8 p_state, c_state;
790
791 if (!r_state && !curr_state)
792 return -EINVAL;
793
794 ret =
795 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
796 if (ret)
797 return ret;
798
799 if (r_state)
800 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
801 if (curr_state)
802 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
803
804 return 0;
805}
806
807/**
808 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
809 * @handle: Pointer to TISCI handle
810 * @id: Device Identifier
811 * @r_state: true if requested to be ON
812 * @curr_state: true if currently ON and active
813 *
814 * Return: 0 if all went fine, else return appropriate error.
815 */
816static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
817 bool *r_state, bool *curr_state)
818{
819 int ret;
820 u8 p_state, c_state;
821
822 if (!r_state && !curr_state)
823 return -EINVAL;
824
825 ret =
826 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
827 if (ret)
828 return ret;
829
830 if (r_state)
831 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
832 if (curr_state)
833 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
834
835 return 0;
836}
837
838/**
839 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
840 * @handle: Pointer to TISCI handle
841 * @id: Device Identifier
842 * @curr_state: true if currently transitioning.
843 *
844 * Return: 0 if all went fine, else return appropriate error.
845 */
846static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
847 bool *curr_state)
848{
849 int ret;
850 u8 state;
851
852 if (!curr_state)
853 return -EINVAL;
854
855 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
856 if (ret)
857 return ret;
858
859 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
860
861 return 0;
862}
863
864/**
865 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
866 * by TISCI
867 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
868 * @id: Device Identifier
869 * @reset_state: Device specific reset bit field
870 *
871 * Return: 0 if all went fine, else return appropriate error.
872 */
873static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
874 u32 id, u32 reset_state)
875{
876 struct ti_sci_msg_req_set_device_resets req;
877 struct ti_sci_msg_hdr *resp;
878 struct ti_sci_info *info;
879 struct ti_sci_xfer *xfer;
880 int ret = 0;
881
882 if (IS_ERR(handle))
883 return PTR_ERR(handle);
884 if (!handle)
885 return -EINVAL;
886
887 info = handle_to_ti_sci_info(handle);
888
889 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
890 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
891 (u32 *)&req, sizeof(req), sizeof(*resp));
892 if (IS_ERR(xfer)) {
893 ret = PTR_ERR(xfer);
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530894 return ret;
895 }
896 req.id = id;
897 req.resets = reset_state;
898
899 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500900 if (ret)
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530901 return ret;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530902
903 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
904
905 if (!ti_sci_is_response_ack(resp))
906 return -ENODEV;
907
908 return ret;
909}
910
911/**
912 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
913 * by TISCI
914 * @handle: Pointer to TISCI handle
915 * @id: Device Identifier
916 * @reset_state: Pointer to reset state to populate
917 *
918 * Return: 0 if all went fine, else return appropriate error.
919 */
920static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
921 u32 id, u32 *reset_state)
922{
923 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
924 NULL);
925}
926
Lokesh Vutla9b871812018-08-27 15:57:35 +0530927/**
928 * ti_sci_set_clock_state() - Set clock state helper
929 * @handle: pointer to TI SCI handle
930 * @dev_id: Device identifier this request is for
931 * @clk_id: Clock identifier for the device for this request.
932 * Each device has it's own set of clock inputs. This indexes
933 * which clock input to modify.
934 * @flags: Header flags as needed
935 * @state: State to request for the clock.
936 *
937 * Return: 0 if all went well, else returns appropriate error value.
938 */
939static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
940 u32 dev_id, u8 clk_id,
941 u32 flags, u8 state)
942{
943 struct ti_sci_msg_req_set_clock_state req;
944 struct ti_sci_msg_hdr *resp;
945 struct ti_sci_info *info;
946 struct ti_sci_xfer *xfer;
947 int ret = 0;
948
949 if (IS_ERR(handle))
950 return PTR_ERR(handle);
951 if (!handle)
952 return -EINVAL;
953
954 info = handle_to_ti_sci_info(handle);
955
956 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
957 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
958 (u32 *)&req, sizeof(req), sizeof(*resp));
959 if (IS_ERR(xfer)) {
960 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +0530961 return ret;
962 }
963 req.dev_id = dev_id;
964 req.clk_id = clk_id;
965 req.request_state = state;
966
967 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -0500968 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +0530969 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +0530970
971 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
972
973 if (!ti_sci_is_response_ack(resp))
974 return -ENODEV;
975
976 return ret;
977}
978
979/**
980 * ti_sci_cmd_get_clock_state() - Get clock state helper
981 * @handle: pointer to TI SCI handle
982 * @dev_id: Device identifier this request is for
983 * @clk_id: Clock identifier for the device for this request.
984 * Each device has it's own set of clock inputs. This indexes
985 * which clock input to modify.
986 * @programmed_state: State requested for clock to move to
987 * @current_state: State that the clock is currently in
988 *
989 * Return: 0 if all went well, else returns appropriate error value.
990 */
991static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
992 u32 dev_id, u8 clk_id,
993 u8 *programmed_state, u8 *current_state)
994{
995 struct ti_sci_msg_resp_get_clock_state *resp;
996 struct ti_sci_msg_req_get_clock_state req;
997 struct ti_sci_info *info;
998 struct ti_sci_xfer *xfer;
999 int ret = 0;
1000
1001 if (IS_ERR(handle))
1002 return PTR_ERR(handle);
1003 if (!handle)
1004 return -EINVAL;
1005
1006 if (!programmed_state && !current_state)
1007 return -EINVAL;
1008
1009 info = handle_to_ti_sci_info(handle);
1010
1011 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1012 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1013 (u32 *)&req, sizeof(req), sizeof(*resp));
1014 if (IS_ERR(xfer)) {
1015 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301016 return ret;
1017 }
1018 req.dev_id = dev_id;
1019 req.clk_id = clk_id;
1020
1021 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001022 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301023 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301024
1025 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1026
1027 if (!ti_sci_is_response_ack(resp))
1028 return -ENODEV;
1029
1030 if (programmed_state)
1031 *programmed_state = resp->programmed_state;
1032 if (current_state)
1033 *current_state = resp->current_state;
1034
1035 return ret;
1036}
1037
1038/**
1039 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1040 * @handle: pointer to TI SCI handle
1041 * @dev_id: Device identifier this request is for
1042 * @clk_id: Clock identifier for the device for this request.
1043 * Each device has it's own set of clock inputs. This indexes
1044 * which clock input to modify.
1045 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1046 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1047 * @enable_input_term: 'true' if input termination is desired, else 'false'
1048 *
1049 * Return: 0 if all went well, else returns appropriate error value.
1050 */
1051static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1052 u8 clk_id, bool needs_ssc, bool can_change_freq,
1053 bool enable_input_term)
1054{
1055 u32 flags = 0;
1056
1057 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1058 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1059 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1060
1061 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1062 MSG_CLOCK_SW_STATE_REQ);
1063}
1064
1065/**
1066 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1067 * @handle: pointer to TI SCI handle
1068 * @dev_id: Device identifier this request is for
1069 * @clk_id: Clock identifier for the device for this request.
1070 * Each device has it's own set of clock inputs. This indexes
1071 * which clock input to modify.
1072 *
1073 * NOTE: This clock must have been requested by get_clock previously.
1074 *
1075 * Return: 0 if all went well, else returns appropriate error value.
1076 */
1077static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1078 u32 dev_id, u8 clk_id)
1079{
1080 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1081 MSG_CLOCK_SW_STATE_UNREQ);
1082}
1083
1084/**
1085 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1086 * @handle: pointer to TI SCI handle
1087 * @dev_id: Device identifier this request is for
1088 * @clk_id: Clock identifier for the device for this request.
1089 * Each device has it's own set of clock inputs. This indexes
1090 * which clock input to modify.
1091 *
1092 * NOTE: This clock must have been requested by get_clock previously.
1093 *
1094 * Return: 0 if all went well, else returns appropriate error value.
1095 */
1096static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1097 u32 dev_id, u8 clk_id)
1098{
1099 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1100 MSG_CLOCK_SW_STATE_AUTO);
1101}
1102
1103/**
1104 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1105 * @handle: pointer to TI SCI handle
1106 * @dev_id: Device identifier this request is for
1107 * @clk_id: Clock identifier for the device for this request.
1108 * Each device has it's own set of clock inputs. This indexes
1109 * which clock input to modify.
1110 * @req_state: state indicating if the clock is auto managed
1111 *
1112 * Return: 0 if all went well, else returns appropriate error value.
1113 */
1114static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1115 u32 dev_id, u8 clk_id, bool *req_state)
1116{
1117 u8 state = 0;
1118 int ret;
1119
1120 if (!req_state)
1121 return -EINVAL;
1122
1123 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1124 if (ret)
1125 return ret;
1126
1127 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1128 return 0;
1129}
1130
1131/**
1132 * ti_sci_cmd_clk_is_on() - Is the clock ON
1133 * @handle: pointer to TI SCI handle
1134 * @dev_id: Device identifier this request is for
1135 * @clk_id: Clock identifier for the device for this request.
1136 * Each device has it's own set of clock inputs. This indexes
1137 * which clock input to modify.
1138 * @req_state: state indicating if the clock is managed by us and enabled
1139 * @curr_state: state indicating if the clock is ready for operation
1140 *
1141 * Return: 0 if all went well, else returns appropriate error value.
1142 */
1143static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1144 u8 clk_id, bool *req_state, bool *curr_state)
1145{
1146 u8 c_state = 0, r_state = 0;
1147 int ret;
1148
1149 if (!req_state && !curr_state)
1150 return -EINVAL;
1151
1152 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1153 &r_state, &c_state);
1154 if (ret)
1155 return ret;
1156
1157 if (req_state)
1158 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1159 if (curr_state)
1160 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1161 return 0;
1162}
1163
1164/**
1165 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1166 * @handle: pointer to TI SCI handle
1167 * @dev_id: Device identifier this request is for
1168 * @clk_id: Clock identifier for the device for this request.
1169 * Each device has it's own set of clock inputs. This indexes
1170 * which clock input to modify.
1171 * @req_state: state indicating if the clock is managed by us and disabled
1172 * @curr_state: state indicating if the clock is NOT ready for operation
1173 *
1174 * Return: 0 if all went well, else returns appropriate error value.
1175 */
1176static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1177 u8 clk_id, bool *req_state, bool *curr_state)
1178{
1179 u8 c_state = 0, r_state = 0;
1180 int ret;
1181
1182 if (!req_state && !curr_state)
1183 return -EINVAL;
1184
1185 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1186 &r_state, &c_state);
1187 if (ret)
1188 return ret;
1189
1190 if (req_state)
1191 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1192 if (curr_state)
1193 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1194 return 0;
1195}
1196
1197/**
1198 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1199 * @handle: pointer to TI SCI handle
1200 * @dev_id: Device identifier this request is for
1201 * @clk_id: Clock identifier for the device for this request.
1202 * Each device has it's own set of clock inputs. This indexes
1203 * which clock input to modify.
1204 * @parent_id: Parent clock identifier to set
1205 *
1206 * Return: 0 if all went well, else returns appropriate error value.
1207 */
1208static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1209 u32 dev_id, u8 clk_id, u8 parent_id)
1210{
1211 struct ti_sci_msg_req_set_clock_parent req;
1212 struct ti_sci_msg_hdr *resp;
1213 struct ti_sci_info *info;
1214 struct ti_sci_xfer *xfer;
1215 int ret = 0;
1216
1217 if (IS_ERR(handle))
1218 return PTR_ERR(handle);
1219 if (!handle)
1220 return -EINVAL;
1221
1222 info = handle_to_ti_sci_info(handle);
1223
1224 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1225 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1226 (u32 *)&req, sizeof(req), sizeof(*resp));
1227 if (IS_ERR(xfer)) {
1228 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301229 return ret;
1230 }
1231 req.dev_id = dev_id;
1232 req.clk_id = clk_id;
1233 req.parent_id = parent_id;
1234
1235 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001236 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301237 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301238
1239 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1240
1241 if (!ti_sci_is_response_ack(resp))
1242 return -ENODEV;
1243
1244 return ret;
1245}
1246
1247/**
1248 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1249 * @handle: pointer to TI SCI handle
1250 * @dev_id: Device identifier this request is for
1251 * @clk_id: Clock identifier for the device for this request.
1252 * Each device has it's own set of clock inputs. This indexes
1253 * which clock input to modify.
1254 * @parent_id: Current clock parent
1255 *
1256 * Return: 0 if all went well, else returns appropriate error value.
1257 */
1258static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1259 u32 dev_id, u8 clk_id, u8 *parent_id)
1260{
1261 struct ti_sci_msg_resp_get_clock_parent *resp;
1262 struct ti_sci_msg_req_get_clock_parent req;
1263 struct ti_sci_info *info;
1264 struct ti_sci_xfer *xfer;
1265 int ret = 0;
1266
1267 if (IS_ERR(handle))
1268 return PTR_ERR(handle);
1269 if (!handle || !parent_id)
1270 return -EINVAL;
1271
1272 info = handle_to_ti_sci_info(handle);
1273
1274 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1275 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1276 (u32 *)&req, sizeof(req), sizeof(*resp));
1277 if (IS_ERR(xfer)) {
1278 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301279 return ret;
1280 }
1281 req.dev_id = dev_id;
1282 req.clk_id = clk_id;
1283
1284 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001285 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301286 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301287
1288 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1289
1290 if (!ti_sci_is_response_ack(resp))
1291 ret = -ENODEV;
1292 else
1293 *parent_id = resp->parent_id;
1294
1295 return ret;
1296}
1297
1298/**
1299 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1300 * @handle: pointer to TI SCI handle
1301 * @dev_id: Device identifier this request is for
1302 * @clk_id: Clock identifier for the device for this request.
1303 * Each device has it's own set of clock inputs. This indexes
1304 * which clock input to modify.
1305 * @num_parents: Returns he number of parents to the current clock.
1306 *
1307 * Return: 0 if all went well, else returns appropriate error value.
1308 */
1309static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1310 u32 dev_id, u8 clk_id,
1311 u8 *num_parents)
1312{
1313 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1314 struct ti_sci_msg_req_get_clock_num_parents req;
1315 struct ti_sci_info *info;
1316 struct ti_sci_xfer *xfer;
1317 int ret = 0;
1318
1319 if (IS_ERR(handle))
1320 return PTR_ERR(handle);
1321 if (!handle || !num_parents)
1322 return -EINVAL;
1323
1324 info = handle_to_ti_sci_info(handle);
1325
1326 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1327 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1328 (u32 *)&req, sizeof(req), sizeof(*resp));
1329 if (IS_ERR(xfer)) {
1330 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301331 return ret;
1332 }
1333 req.dev_id = dev_id;
1334 req.clk_id = clk_id;
1335
1336 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001337 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301338 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301339
1340 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1341 xfer->tx_message.buf;
1342
1343 if (!ti_sci_is_response_ack(resp))
1344 ret = -ENODEV;
1345 else
1346 *num_parents = resp->num_parents;
1347
1348 return ret;
1349}
1350
1351/**
1352 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1353 * @handle: pointer to TI SCI handle
1354 * @dev_id: Device identifier this request is for
1355 * @clk_id: Clock identifier for the device for this request.
1356 * Each device has it's own set of clock inputs. This indexes
1357 * which clock input to modify.
1358 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1359 * allowable programmed frequency and does not account for clock
1360 * tolerances and jitter.
1361 * @target_freq: The target clock frequency in Hz. A frequency will be
1362 * processed as close to this target frequency as possible.
1363 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1364 * allowable programmed frequency and does not account for clock
1365 * tolerances and jitter.
1366 * @match_freq: Frequency match in Hz response.
1367 *
1368 * Return: 0 if all went well, else returns appropriate error value.
1369 */
1370static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1371 u32 dev_id, u8 clk_id, u64 min_freq,
1372 u64 target_freq, u64 max_freq,
1373 u64 *match_freq)
1374{
1375 struct ti_sci_msg_resp_query_clock_freq *resp;
1376 struct ti_sci_msg_req_query_clock_freq req;
1377 struct ti_sci_info *info;
1378 struct ti_sci_xfer *xfer;
1379 int ret = 0;
1380
1381 if (IS_ERR(handle))
1382 return PTR_ERR(handle);
1383 if (!handle || !match_freq)
1384 return -EINVAL;
1385
1386 info = handle_to_ti_sci_info(handle);
1387
1388 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1389 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1390 (u32 *)&req, sizeof(req), sizeof(*resp));
1391 if (IS_ERR(xfer)) {
1392 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301393 return ret;
1394 }
1395 req.dev_id = dev_id;
1396 req.clk_id = clk_id;
1397 req.min_freq_hz = min_freq;
1398 req.target_freq_hz = target_freq;
1399 req.max_freq_hz = max_freq;
1400
1401 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001402 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301403 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301404
1405 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1406
1407 if (!ti_sci_is_response_ack(resp))
1408 ret = -ENODEV;
1409 else
1410 *match_freq = resp->freq_hz;
1411
1412 return ret;
1413}
1414
1415/**
1416 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1417 * @handle: pointer to TI SCI handle
1418 * @dev_id: Device identifier this request is for
1419 * @clk_id: Clock identifier for the device for this request.
1420 * Each device has it's own set of clock inputs. This indexes
1421 * which clock input to modify.
1422 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1423 * allowable programmed frequency and does not account for clock
1424 * tolerances and jitter.
1425 * @target_freq: The target clock frequency in Hz. A frequency will be
1426 * processed as close to this target frequency as possible.
1427 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1428 * allowable programmed frequency and does not account for clock
1429 * tolerances and jitter.
1430 *
1431 * Return: 0 if all went well, else returns appropriate error value.
1432 */
1433static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1434 u32 dev_id, u8 clk_id, u64 min_freq,
1435 u64 target_freq, u64 max_freq)
1436{
1437 struct ti_sci_msg_req_set_clock_freq req;
1438 struct ti_sci_msg_hdr *resp;
1439 struct ti_sci_info *info;
1440 struct ti_sci_xfer *xfer;
1441 int ret = 0;
1442
1443 if (IS_ERR(handle))
1444 return PTR_ERR(handle);
1445 if (!handle)
1446 return -EINVAL;
1447
1448 info = handle_to_ti_sci_info(handle);
1449
1450 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1451 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1452 (u32 *)&req, sizeof(req), sizeof(*resp));
1453 if (IS_ERR(xfer)) {
1454 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301455 return ret;
1456 }
1457 req.dev_id = dev_id;
1458 req.clk_id = clk_id;
1459 req.min_freq_hz = min_freq;
1460 req.target_freq_hz = target_freq;
1461 req.max_freq_hz = max_freq;
1462
1463 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001464 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301465 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301466
1467 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1468
1469 if (!ti_sci_is_response_ack(resp))
1470 return -ENODEV;
1471
1472 return ret;
1473}
1474
1475/**
1476 * ti_sci_cmd_clk_get_freq() - Get current frequency
1477 * @handle: pointer to TI SCI handle
1478 * @dev_id: Device identifier this request is for
1479 * @clk_id: Clock identifier for the device for this request.
1480 * Each device has it's own set of clock inputs. This indexes
1481 * which clock input to modify.
1482 * @freq: Currently frequency in Hz
1483 *
1484 * Return: 0 if all went well, else returns appropriate error value.
1485 */
1486static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1487 u32 dev_id, u8 clk_id, u64 *freq)
1488{
1489 struct ti_sci_msg_resp_get_clock_freq *resp;
1490 struct ti_sci_msg_req_get_clock_freq req;
1491 struct ti_sci_info *info;
1492 struct ti_sci_xfer *xfer;
1493 int ret = 0;
1494
1495 if (IS_ERR(handle))
1496 return PTR_ERR(handle);
1497 if (!handle || !freq)
1498 return -EINVAL;
1499
1500 info = handle_to_ti_sci_info(handle);
1501
1502 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1503 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1504 (u32 *)&req, sizeof(req), sizeof(*resp));
1505 if (IS_ERR(xfer)) {
1506 ret = PTR_ERR(xfer);
Lokesh Vutla9b871812018-08-27 15:57:35 +05301507 return ret;
1508 }
1509 req.dev_id = dev_id;
1510 req.clk_id = clk_id;
1511
1512 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001513 if (ret)
Lokesh Vutla9b871812018-08-27 15:57:35 +05301514 return ret;
Lokesh Vutla9b871812018-08-27 15:57:35 +05301515
1516 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1517
1518 if (!ti_sci_is_response_ack(resp))
1519 ret = -ENODEV;
1520 else
1521 *freq = resp->freq_hz;
1522
1523 return ret;
1524}
1525
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301526/**
1527 * ti_sci_cmd_core_reboot() - Command to request system reset
1528 * @handle: pointer to TI SCI handle
1529 *
1530 * Return: 0 if all went well, else returns appropriate error value.
1531 */
1532static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1533{
1534 struct ti_sci_msg_req_reboot req;
1535 struct ti_sci_msg_hdr *resp;
1536 struct ti_sci_info *info;
1537 struct ti_sci_xfer *xfer;
1538 int ret = 0;
1539
1540 if (IS_ERR(handle))
1541 return PTR_ERR(handle);
1542 if (!handle)
1543 return -EINVAL;
1544
1545 info = handle_to_ti_sci_info(handle);
1546
1547 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1548 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1549 (u32 *)&req, sizeof(req), sizeof(*resp));
1550 if (IS_ERR(xfer)) {
1551 ret = PTR_ERR(xfer);
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301552 return ret;
1553 }
Dave Gerlachbeed3052021-05-13 20:10:55 -05001554 req.domain = 0;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301555
1556 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001557 if (ret)
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301558 return ret;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301559
1560 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1561
1562 if (!ti_sci_is_response_ack(resp))
1563 return -ENODEV;
1564
1565 return ret;
1566}
1567
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301568/**
1569 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1570 * to a host. Resource is uniquely identified by
1571 * type and subtype.
1572 * @handle: Pointer to TISCI handle.
1573 * @dev_id: TISCI device ID.
1574 * @subtype: Resource assignment subtype that is being requested
1575 * from the given device.
1576 * @s_host: Host processor ID to which the resources are allocated
1577 * @range_start: Start index of the resource range
1578 * @range_num: Number of resources in the range
1579 *
1580 * Return: 0 if all went fine, else return appropriate error.
1581 */
1582static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1583 u32 dev_id, u8 subtype, u8 s_host,
1584 u16 *range_start, u16 *range_num)
1585{
1586 struct ti_sci_msg_resp_get_resource_range *resp;
1587 struct ti_sci_msg_req_get_resource_range req;
1588 struct ti_sci_xfer *xfer;
1589 struct ti_sci_info *info;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301590 int ret = 0;
1591
1592 if (IS_ERR(handle))
1593 return PTR_ERR(handle);
1594 if (!handle)
1595 return -EINVAL;
1596
1597 info = handle_to_ti_sci_info(handle);
1598
1599 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1600 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1601 (u32 *)&req, sizeof(req), sizeof(*resp));
1602 if (IS_ERR(xfer)) {
1603 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301604 return ret;
1605 }
1606
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301607 req.secondary_host = s_host;
Lokesh Vutla4986b152020-08-17 11:00:48 +05301608 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301609 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1610
1611 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001612 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301613 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301614
1615 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1616 if (!ti_sci_is_response_ack(resp)) {
1617 ret = -ENODEV;
1618 } else if (!resp->range_start && !resp->range_num) {
1619 ret = -ENODEV;
1620 } else {
1621 *range_start = resp->range_start;
1622 *range_num = resp->range_num;
1623 };
1624
1625fail:
1626 return ret;
1627}
1628
Vignesh Raghavendra0e811582021-06-07 19:47:48 +05301629static int __maybe_unused
Vignesh Raghavendra5d5a6992021-06-07 19:47:49 +05301630ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1631 u32 dev_id, u8 subtype,
1632 u16 *range_start, u16 *range_num)
Vignesh Raghavendra0e811582021-06-07 19:47:48 +05301633{
1634 struct ti_sci_resource_static_data *data;
1635 int i = 0;
1636
1637 while (1) {
1638 data = &rm_static_data[i];
1639
1640 if (!data->dev_id)
1641 return -EINVAL;
1642
1643 if (data->dev_id != dev_id || data->subtype != subtype) {
1644 i++;
1645 continue;
1646 }
1647
1648 *range_start = data->range_start;
1649 *range_num = data->range_num;
1650
1651 return 0;
1652 }
1653
1654 return -EINVAL;
1655}
1656
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301657/**
1658 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1659 * that is same as ti sci interface host.
1660 * @handle: Pointer to TISCI handle.
1661 * @dev_id: TISCI device ID.
1662 * @subtype: Resource assignment subtype that is being requested
1663 * from the given device.
1664 * @range_start: Start index of the resource range
1665 * @range_num: Number of resources in the range
1666 *
1667 * Return: 0 if all went fine, else return appropriate error.
1668 */
1669static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1670 u32 dev_id, u8 subtype,
1671 u16 *range_start, u16 *range_num)
1672{
1673 return ti_sci_get_resource_range(handle, dev_id, subtype,
1674 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1675 range_start, range_num);
1676}
1677
1678/**
1679 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1680 * assigned to a specified host.
1681 * @handle: Pointer to TISCI handle.
1682 * @dev_id: TISCI device ID.
1683 * @subtype: Resource assignment subtype that is being requested
1684 * from the given device.
1685 * @s_host: Host processor ID to which the resources are allocated
1686 * @range_start: Start index of the resource range
1687 * @range_num: Number of resources in the range
1688 *
1689 * Return: 0 if all went fine, else return appropriate error.
1690 */
1691static
1692int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1693 u32 dev_id, u8 subtype, u8 s_host,
1694 u16 *range_start, u16 *range_num)
1695{
1696 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1697 range_start, range_num);
1698}
1699
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301700/**
Lokesh Vutla826eb742019-03-08 11:47:32 +05301701 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1702 * @handle: pointer to TI SCI handle
1703 * @msms_start: MSMC start as returned by tisci
1704 * @msmc_end: MSMC end as returned by tisci
1705 *
1706 * Return: 0 if all went well, else returns appropriate error value.
1707 */
1708static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1709 u64 *msmc_start, u64 *msmc_end)
1710{
1711 struct ti_sci_msg_resp_query_msmc *resp;
1712 struct ti_sci_msg_hdr req;
1713 struct ti_sci_info *info;
1714 struct ti_sci_xfer *xfer;
1715 int ret = 0;
1716
1717 if (IS_ERR(handle))
1718 return PTR_ERR(handle);
1719 if (!handle)
1720 return -EINVAL;
1721
1722 info = handle_to_ti_sci_info(handle);
1723
1724 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1725 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1726 (u32 *)&req, sizeof(req), sizeof(*resp));
1727 if (IS_ERR(xfer)) {
1728 ret = PTR_ERR(xfer);
Lokesh Vutla826eb742019-03-08 11:47:32 +05301729 return ret;
1730 }
1731
1732 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001733 if (ret)
Lokesh Vutla826eb742019-03-08 11:47:32 +05301734 return ret;
Lokesh Vutla826eb742019-03-08 11:47:32 +05301735
1736 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1737
1738 if (!ti_sci_is_response_ack(resp))
1739 return -ENODEV;
1740
1741 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1742 resp->msmc_start_low;
1743 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1744 resp->msmc_end_low;
1745
1746 return ret;
1747}
1748
1749/**
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301750 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1751 * @handle: Pointer to TI SCI handle
1752 * @proc_id: Processor ID this request is for
1753 *
1754 * Return: 0 if all went well, else returns appropriate error value.
1755 */
1756static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1757 u8 proc_id)
1758{
1759 struct ti_sci_msg_req_proc_request req;
1760 struct ti_sci_msg_hdr *resp;
1761 struct ti_sci_info *info;
1762 struct ti_sci_xfer *xfer;
1763 int ret = 0;
1764
1765 if (IS_ERR(handle))
1766 return PTR_ERR(handle);
1767 if (!handle)
1768 return -EINVAL;
1769
1770 info = handle_to_ti_sci_info(handle);
1771
1772 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1773 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1774 (u32 *)&req, sizeof(req), sizeof(*resp));
1775 if (IS_ERR(xfer)) {
1776 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301777 return ret;
1778 }
1779 req.processor_id = proc_id;
1780
1781 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001782 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301783 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301784
1785 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1786
1787 if (!ti_sci_is_response_ack(resp))
1788 ret = -ENODEV;
1789
1790 return ret;
1791}
1792
1793/**
1794 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1795 * @handle: Pointer to TI SCI handle
1796 * @proc_id: Processor ID this request is for
1797 *
1798 * Return: 0 if all went well, else returns appropriate error value.
1799 */
1800static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1801 u8 proc_id)
1802{
1803 struct ti_sci_msg_req_proc_release req;
1804 struct ti_sci_msg_hdr *resp;
1805 struct ti_sci_info *info;
1806 struct ti_sci_xfer *xfer;
1807 int ret = 0;
1808
1809 if (IS_ERR(handle))
1810 return PTR_ERR(handle);
1811 if (!handle)
1812 return -EINVAL;
1813
1814 info = handle_to_ti_sci_info(handle);
1815
1816 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1817 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1818 (u32 *)&req, sizeof(req), sizeof(*resp));
1819 if (IS_ERR(xfer)) {
1820 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301821 return ret;
1822 }
1823 req.processor_id = proc_id;
1824
1825 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001826 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301827 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301828
1829 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1830
1831 if (!ti_sci_is_response_ack(resp))
1832 ret = -ENODEV;
1833
1834 return ret;
1835}
1836
1837/**
1838 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1839 * control to a host in the processor's access
1840 * control list.
1841 * @handle: Pointer to TI SCI handle
1842 * @proc_id: Processor ID this request is for
1843 * @host_id: Host ID to get the control of the processor
1844 *
1845 * Return: 0 if all went well, else returns appropriate error value.
1846 */
1847static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1848 u8 proc_id, u8 host_id)
1849{
1850 struct ti_sci_msg_req_proc_handover req;
1851 struct ti_sci_msg_hdr *resp;
1852 struct ti_sci_info *info;
1853 struct ti_sci_xfer *xfer;
1854 int ret = 0;
1855
1856 if (IS_ERR(handle))
1857 return PTR_ERR(handle);
1858 if (!handle)
1859 return -EINVAL;
1860
1861 info = handle_to_ti_sci_info(handle);
1862
1863 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1864 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1865 (u32 *)&req, sizeof(req), sizeof(*resp));
1866 if (IS_ERR(xfer)) {
1867 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301868 return ret;
1869 }
1870 req.processor_id = proc_id;
1871 req.host_id = host_id;
1872
1873 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001874 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301875 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301876
1877 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1878
1879 if (!ti_sci_is_response_ack(resp))
1880 ret = -ENODEV;
1881
1882 return ret;
1883}
1884
1885/**
1886 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1887 * configuration flags
1888 * @handle: Pointer to TI SCI handle
1889 * @proc_id: Processor ID this request is for
1890 * @config_flags_set: Configuration flags to be set
1891 * @config_flags_clear: Configuration flags to be cleared.
1892 *
1893 * Return: 0 if all went well, else returns appropriate error value.
1894 */
1895static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1896 u8 proc_id, u64 bootvector,
1897 u32 config_flags_set,
1898 u32 config_flags_clear)
1899{
1900 struct ti_sci_msg_req_set_proc_boot_config req;
1901 struct ti_sci_msg_hdr *resp;
1902 struct ti_sci_info *info;
1903 struct ti_sci_xfer *xfer;
1904 int ret = 0;
1905
1906 if (IS_ERR(handle))
1907 return PTR_ERR(handle);
1908 if (!handle)
1909 return -EINVAL;
1910
1911 info = handle_to_ti_sci_info(handle);
1912
1913 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1914 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1915 (u32 *)&req, sizeof(req), sizeof(*resp));
1916 if (IS_ERR(xfer)) {
1917 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301918 return ret;
1919 }
1920 req.processor_id = proc_id;
1921 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1922 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1923 TISCI_ADDR_HIGH_SHIFT;
1924 req.config_flags_set = config_flags_set;
1925 req.config_flags_clear = config_flags_clear;
1926
1927 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001928 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301929 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301930
1931 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1932
1933 if (!ti_sci_is_response_ack(resp))
1934 ret = -ENODEV;
1935
1936 return ret;
1937}
1938
1939/**
1940 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1941 * control flags
1942 * @handle: Pointer to TI SCI handle
1943 * @proc_id: Processor ID this request is for
1944 * @control_flags_set: Control flags to be set
1945 * @control_flags_clear: Control flags to be cleared
1946 *
1947 * Return: 0 if all went well, else returns appropriate error value.
1948 */
1949static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1950 u8 proc_id, u32 control_flags_set,
1951 u32 control_flags_clear)
1952{
1953 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1954 struct ti_sci_msg_hdr *resp;
1955 struct ti_sci_info *info;
1956 struct ti_sci_xfer *xfer;
1957 int ret = 0;
1958
1959 if (IS_ERR(handle))
1960 return PTR_ERR(handle);
1961 if (!handle)
1962 return -EINVAL;
1963
1964 info = handle_to_ti_sci_info(handle);
1965
1966 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1967 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1968 (u32 *)&req, sizeof(req), sizeof(*resp));
1969 if (IS_ERR(xfer)) {
1970 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301971 return ret;
1972 }
1973 req.processor_id = proc_id;
1974 req.control_flags_set = control_flags_set;
1975 req.control_flags_clear = control_flags_clear;
1976
1977 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05001978 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301979 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301980
1981 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1982
1983 if (!ti_sci_is_response_ack(resp))
1984 ret = -ENODEV;
1985
1986 return ret;
1987}
1988
1989/**
1990 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1991 * image and then set the processor configuration flags.
1992 * @handle: Pointer to TI SCI handle
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001993 * @image_addr: Memory address at which payload image and certificate is
1994 * located in memory, this is updated if the image data is
1995 * moved during authentication.
1996 * @image_size: This is updated with the final size of the image after
1997 * authentication.
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301998 *
1999 * Return: 0 if all went well, else returns appropriate error value.
2000 */
2001static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davisff6043a2019-04-12 12:54:44 -04002002 u64 *image_addr, u32 *image_size)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302003{
2004 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davisff6043a2019-04-12 12:54:44 -04002005 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302006 struct ti_sci_info *info;
2007 struct ti_sci_xfer *xfer;
2008 int ret = 0;
2009
2010 if (IS_ERR(handle))
2011 return PTR_ERR(handle);
2012 if (!handle)
2013 return -EINVAL;
2014
2015 info = handle_to_ti_sci_info(handle);
2016
2017 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
2018 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2019 (u32 *)&req, sizeof(req), sizeof(*resp));
2020 if (IS_ERR(xfer)) {
2021 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302022 return ret;
2023 }
Andrew F. Davisff6043a2019-04-12 12:54:44 -04002024 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
2025 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302026 TISCI_ADDR_HIGH_SHIFT;
2027
2028 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002029 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302030 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302031
Andrew F. Davisff6043a2019-04-12 12:54:44 -04002032 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302033
2034 if (!ti_sci_is_response_ack(resp))
Andrew F. Davisff6043a2019-04-12 12:54:44 -04002035 return -ENODEV;
2036
2037 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2038 (((u64)resp->image_addr_high <<
2039 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2040 *image_size = resp->image_size;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302041
2042 return ret;
2043}
2044
2045/**
2046 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
2047 * @handle: Pointer to TI SCI handle
2048 * @proc_id: Processor ID this request is for
2049 *
2050 * Return: 0 if all went well, else returns appropriate error value.
2051 */
2052static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2053 u8 proc_id, u64 *bv, u32 *cfg_flags,
2054 u32 *ctrl_flags, u32 *sts_flags)
2055{
2056 struct ti_sci_msg_resp_get_proc_boot_status *resp;
2057 struct ti_sci_msg_req_get_proc_boot_status req;
2058 struct ti_sci_info *info;
2059 struct ti_sci_xfer *xfer;
2060 int ret = 0;
2061
2062 if (IS_ERR(handle))
2063 return PTR_ERR(handle);
2064 if (!handle)
2065 return -EINVAL;
2066
2067 info = handle_to_ti_sci_info(handle);
2068
2069 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2070 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2071 (u32 *)&req, sizeof(req), sizeof(*resp));
2072 if (IS_ERR(xfer)) {
2073 ret = PTR_ERR(xfer);
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302074 return ret;
2075 }
2076 req.processor_id = proc_id;
2077
2078 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002079 if (ret)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302080 return ret;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302081
2082 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2083 xfer->tx_message.buf;
2084
2085 if (!ti_sci_is_response_ack(resp))
2086 return -ENODEV;
2087 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2088 (((u64)resp->bootvector_high <<
2089 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2090 *cfg_flags = resp->config_flags;
2091 *ctrl_flags = resp->control_flags;
2092 *sts_flags = resp->status_flags;
2093
2094 return ret;
2095}
2096
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302097/**
Andreas Dannenberg410adcc2019-06-07 19:24:40 +05302098 * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2099 * processor boot status without requesting or
2100 * waiting for a response.
2101 * @proc_id: Processor ID this request is for
2102 * @num_wait_iterations: Total number of iterations we will check before
2103 * we will timeout and give up
2104 * @num_match_iterations: How many iterations should we have continued
2105 * status to account for status bits glitching.
2106 * This is to make sure that match occurs for
2107 * consecutive checks. This implies that the
2108 * worst case should consider that the stable
2109 * time should at the worst be num_wait_iterations
2110 * num_match_iterations to prevent timeout.
2111 * @delay_per_iteration_us: Specifies how long to wait (in micro seconds)
2112 * between each status checks. This is the minimum
2113 * duration, and overhead of register reads and
2114 * checks are on top of this and can vary based on
2115 * varied conditions.
2116 * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
2117 * before the very first check in the first
2118 * iteration of status check loop. This is the
2119 * minimum duration, and overhead of register
2120 * reads and checks are.
2121 * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2122 * status matching this field requested MUST be 1.
2123 * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2124 * bits matching this field requested MUST be 1.
2125 * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2126 * status matching this field requested MUST be 0.
2127 * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2128 * bits matching this field requested MUST be 0.
2129 *
2130 * Return: 0 if all goes well, else appropriate error message
2131 */
2132static int
2133ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2134 u8 proc_id,
2135 u8 num_wait_iterations,
2136 u8 num_match_iterations,
2137 u8 delay_per_iteration_us,
2138 u8 delay_before_iterations_us,
2139 u32 status_flags_1_set_all_wait,
2140 u32 status_flags_1_set_any_wait,
2141 u32 status_flags_1_clr_all_wait,
2142 u32 status_flags_1_clr_any_wait)
2143{
2144 struct ti_sci_msg_req_wait_proc_boot_status req;
2145 struct ti_sci_info *info;
2146 struct ti_sci_xfer *xfer;
2147 int ret = 0;
2148
2149 if (IS_ERR(handle))
2150 return PTR_ERR(handle);
2151 if (!handle)
2152 return -EINVAL;
2153
2154 info = handle_to_ti_sci_info(handle);
2155
2156 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2157 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2158 (u32 *)&req, sizeof(req), 0);
2159 if (IS_ERR(xfer)) {
2160 ret = PTR_ERR(xfer);
Andreas Dannenberg410adcc2019-06-07 19:24:40 +05302161 return ret;
2162 }
2163 req.processor_id = proc_id;
2164 req.num_wait_iterations = num_wait_iterations;
2165 req.num_match_iterations = num_match_iterations;
2166 req.delay_per_iteration_us = delay_per_iteration_us;
2167 req.delay_before_iterations_us = delay_before_iterations_us;
2168 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2169 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2170 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2171 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2172
2173 ret = ti_sci_do_xfer(info, xfer);
2174 if (ret)
Andrew Davis59178502022-07-25 20:25:03 -05002175 return ret;
Andreas Dannenberg410adcc2019-06-07 19:24:40 +05302176
2177 return ret;
2178}
2179
2180/**
2181 * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2182 * requesting or waiting for a response. Note that this API call
2183 * should be followed by placing the respective processor into
2184 * either WFE or WFI mode.
2185 * @handle: Pointer to TI SCI handle
2186 * @proc_id: Processor ID this request is for
2187 *
2188 * Return: 0 if all went well, else returns appropriate error value.
2189 */
2190static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2191 u8 proc_id)
2192{
2193 int ret;
Sean Andersone5792302020-09-15 10:44:38 -04002194 struct ti_sci_info *info;
2195
2196 if (IS_ERR(handle))
2197 return PTR_ERR(handle);
2198 if (!handle)
2199 return -EINVAL;
2200
2201 info = handle_to_ti_sci_info(handle);
Andreas Dannenberg410adcc2019-06-07 19:24:40 +05302202
2203 /*
2204 * Send the core boot status wait message waiting for either WFE or
2205 * WFI without requesting or waiting for a TISCI response with the
2206 * maximum wait time to give us the best chance to get to the WFE/WFI
2207 * command that should follow the invocation of this API before the
2208 * DMSC-internal processing of this command times out. Note that
2209 * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2210 * core as the related flag bit positions are the same.
2211 */
2212 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2213 U8_MAX, 100, U8_MAX, U8_MAX,
2214 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2215 0, 0);
2216 if (ret) {
2217 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2218 proc_id, ret);
2219 return ret;
2220 }
2221
2222 /*
2223 * Release a processor managed by TISCI without requesting or waiting
2224 * for a response.
2225 */
2226 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2227 MSG_DEVICE_SW_STATE_AUTO_OFF);
2228 if (ret)
2229 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2230 proc_id, ret);
2231
2232 return ret;
2233}
2234
2235/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302236 * ti_sci_cmd_ring_config() - configure RA ring
2237 * @handle: pointer to TI SCI handle
2238 * @valid_params: Bitfield defining validity of ring configuration parameters.
2239 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2240 * @index: Ring index.
2241 * @addr_lo: The ring base address lo 32 bits
2242 * @addr_hi: The ring base address hi 32 bits
2243 * @count: Number of ring elements.
2244 * @mode: The mode of the ring
2245 * @size: The ring element size.
2246 * @order_id: Specifies the ring's bus order ID.
2247 *
2248 * Return: 0 if all went well, else returns appropriate error value.
2249 *
2250 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2251 */
2252static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2253 u32 valid_params, u16 nav_id, u16 index,
2254 u32 addr_lo, u32 addr_hi, u32 count,
2255 u8 mode, u8 size, u8 order_id)
2256{
2257 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2258 struct ti_sci_msg_rm_ring_cfg_req req;
2259 struct ti_sci_xfer *xfer;
2260 struct ti_sci_info *info;
2261 int ret = 0;
2262
2263 if (IS_ERR(handle))
2264 return PTR_ERR(handle);
2265 if (!handle)
2266 return -EINVAL;
2267
2268 info = handle_to_ti_sci_info(handle);
2269
2270 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2271 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2272 (u32 *)&req, sizeof(req), sizeof(*resp));
2273 if (IS_ERR(xfer)) {
2274 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302275 return ret;
2276 }
2277 req.valid_params = valid_params;
2278 req.nav_id = nav_id;
2279 req.index = index;
2280 req.addr_lo = addr_lo;
2281 req.addr_hi = addr_hi;
2282 req.count = count;
2283 req.mode = mode;
2284 req.size = size;
2285 req.order_id = order_id;
2286
2287 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002288 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302289 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302290
2291 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2292
2293 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2294
2295fail:
2296 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2297 return ret;
2298}
2299
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302300static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2301 u32 nav_id, u32 src_thread, u32 dst_thread)
2302{
2303 struct ti_sci_msg_hdr *resp;
2304 struct ti_sci_msg_psil_pair req;
2305 struct ti_sci_xfer *xfer;
2306 struct ti_sci_info *info;
2307 int ret = 0;
2308
2309 if (IS_ERR(handle))
2310 return PTR_ERR(handle);
2311 if (!handle)
2312 return -EINVAL;
2313
2314 info = handle_to_ti_sci_info(handle);
2315
2316 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2317 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2318 (u32 *)&req, sizeof(req), sizeof(*resp));
2319 if (IS_ERR(xfer)) {
2320 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302321 return ret;
2322 }
2323 req.nav_id = nav_id;
2324 req.src_thread = src_thread;
2325 req.dst_thread = dst_thread;
2326
2327 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002328 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302329 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302330
2331 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2332 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2333
2334fail:
2335 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2336 nav_id, src_thread, dst_thread, ret);
2337 return ret;
2338}
2339
2340static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2341 u32 nav_id, u32 src_thread, u32 dst_thread)
2342{
2343 struct ti_sci_msg_hdr *resp;
2344 struct ti_sci_msg_psil_unpair req;
2345 struct ti_sci_xfer *xfer;
2346 struct ti_sci_info *info;
2347 int ret = 0;
2348
2349 if (IS_ERR(handle))
2350 return PTR_ERR(handle);
2351 if (!handle)
2352 return -EINVAL;
2353
2354 info = handle_to_ti_sci_info(handle);
2355
2356 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2357 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2358 (u32 *)&req, sizeof(req), sizeof(*resp));
2359 if (IS_ERR(xfer)) {
2360 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302361 return ret;
2362 }
2363 req.nav_id = nav_id;
2364 req.src_thread = src_thread;
2365 req.dst_thread = dst_thread;
2366
2367 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002368 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302369 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302370
2371 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2372 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2373
2374fail:
2375 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2376 src_thread, dst_thread, ret);
2377 return ret;
2378}
2379
2380static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2381 const struct ti_sci_handle *handle,
2382 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2383{
2384 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2385 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2386 struct ti_sci_xfer *xfer;
2387 struct ti_sci_info *info;
2388 int ret = 0;
2389
2390 if (IS_ERR(handle))
2391 return PTR_ERR(handle);
2392 if (!handle)
2393 return -EINVAL;
2394
2395 info = handle_to_ti_sci_info(handle);
2396
2397 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2398 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2399 (u32 *)&req, sizeof(req), sizeof(*resp));
2400 if (IS_ERR(xfer)) {
2401 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302402 return ret;
2403 }
2404 req.valid_params = params->valid_params;
2405 req.nav_id = params->nav_id;
2406 req.index = params->index;
2407 req.tx_pause_on_err = params->tx_pause_on_err;
2408 req.tx_filt_einfo = params->tx_filt_einfo;
2409 req.tx_filt_pswords = params->tx_filt_pswords;
2410 req.tx_atype = params->tx_atype;
2411 req.tx_chan_type = params->tx_chan_type;
2412 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2413 req.tx_fetch_size = params->tx_fetch_size;
2414 req.tx_credit_count = params->tx_credit_count;
2415 req.txcq_qnum = params->txcq_qnum;
2416 req.tx_priority = params->tx_priority;
2417 req.tx_qos = params->tx_qos;
2418 req.tx_orderid = params->tx_orderid;
2419 req.fdepth = params->fdepth;
2420 req.tx_sched_priority = params->tx_sched_priority;
Vignesh Raghavendra91f1e792021-05-10 20:06:02 +05302421 req.tx_burst_size = params->tx_burst_size;
2422 req.tx_tdtype = params->tx_tdtype;
2423 req.extended_ch_type = params->extended_ch_type;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302424
2425 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002426 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302427 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302428
2429 resp =
2430 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2431 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2432
2433fail:
2434 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2435 return ret;
2436}
2437
2438static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2439 const struct ti_sci_handle *handle,
2440 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2441{
2442 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2443 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2444 struct ti_sci_xfer *xfer;
2445 struct ti_sci_info *info;
2446 int ret = 0;
2447
2448 if (IS_ERR(handle))
2449 return PTR_ERR(handle);
2450 if (!handle)
2451 return -EINVAL;
2452
2453 info = handle_to_ti_sci_info(handle);
2454
2455 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2456 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2457 (u32 *)&req, sizeof(req), sizeof(*resp));
2458 if (IS_ERR(xfer)) {
2459 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302460 return ret;
2461 }
2462
2463 req.valid_params = params->valid_params;
2464 req.nav_id = params->nav_id;
2465 req.index = params->index;
2466 req.rx_fetch_size = params->rx_fetch_size;
2467 req.rxcq_qnum = params->rxcq_qnum;
2468 req.rx_priority = params->rx_priority;
2469 req.rx_qos = params->rx_qos;
2470 req.rx_orderid = params->rx_orderid;
2471 req.rx_sched_priority = params->rx_sched_priority;
2472 req.flowid_start = params->flowid_start;
2473 req.flowid_cnt = params->flowid_cnt;
2474 req.rx_pause_on_err = params->rx_pause_on_err;
2475 req.rx_atype = params->rx_atype;
2476 req.rx_chan_type = params->rx_chan_type;
2477 req.rx_ignore_short = params->rx_ignore_short;
2478 req.rx_ignore_long = params->rx_ignore_long;
2479
2480 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002481 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302482 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302483
2484 resp =
2485 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2486 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2487
2488fail:
2489 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2490 return ret;
2491}
2492
2493static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2494 const struct ti_sci_handle *handle,
2495 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2496{
2497 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2498 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2499 struct ti_sci_xfer *xfer;
2500 struct ti_sci_info *info;
2501 int ret = 0;
2502
2503 if (IS_ERR(handle))
2504 return PTR_ERR(handle);
2505 if (!handle)
2506 return -EINVAL;
2507
2508 info = handle_to_ti_sci_info(handle);
2509
2510 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2511 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2512 (u32 *)&req, sizeof(req), sizeof(*resp));
2513 if (IS_ERR(xfer)) {
2514 ret = PTR_ERR(xfer);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302515 return ret;
2516 }
2517
2518 req.valid_params = params->valid_params;
2519 req.nav_id = params->nav_id;
2520 req.flow_index = params->flow_index;
2521 req.rx_einfo_present = params->rx_einfo_present;
2522 req.rx_psinfo_present = params->rx_psinfo_present;
2523 req.rx_error_handling = params->rx_error_handling;
2524 req.rx_desc_type = params->rx_desc_type;
2525 req.rx_sop_offset = params->rx_sop_offset;
2526 req.rx_dest_qnum = params->rx_dest_qnum;
2527 req.rx_src_tag_hi = params->rx_src_tag_hi;
2528 req.rx_src_tag_lo = params->rx_src_tag_lo;
2529 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2530 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2531 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2532 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2533 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2534 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2535 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2536 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2537 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2538 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2539 req.rx_ps_location = params->rx_ps_location;
2540
2541 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002542 if (ret)
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302543 goto fail;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302544
2545 resp =
2546 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2547 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2548
2549fail:
2550 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2551 return ret;
2552}
2553
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002554/**
2555 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2556 * @handle: pointer to TI SCI handle
2557 * @region: region configuration parameters
2558 *
2559 * Return: 0 if all went well, else returns appropriate error value.
2560 */
2561static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2562 const struct ti_sci_msg_fwl_region *region)
2563{
2564 struct ti_sci_msg_fwl_set_firewall_region_req req;
2565 struct ti_sci_msg_hdr *resp;
2566 struct ti_sci_info *info;
2567 struct ti_sci_xfer *xfer;
2568 int ret = 0;
2569
2570 if (IS_ERR(handle))
2571 return PTR_ERR(handle);
2572 if (!handle)
2573 return -EINVAL;
2574
2575 info = handle_to_ti_sci_info(handle);
2576
2577 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2578 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2579 (u32 *)&req, sizeof(req), sizeof(*resp));
2580 if (IS_ERR(xfer)) {
2581 ret = PTR_ERR(xfer);
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002582 return ret;
2583 }
2584
2585 req.fwl_id = region->fwl_id;
2586 req.region = region->region;
2587 req.n_permission_regs = region->n_permission_regs;
2588 req.control = region->control;
2589 req.permissions[0] = region->permissions[0];
2590 req.permissions[1] = region->permissions[1];
2591 req.permissions[2] = region->permissions[2];
2592 req.start_address = region->start_address;
2593 req.end_address = region->end_address;
2594
2595 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002596 if (ret)
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002597 return ret;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002598
2599 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2600
2601 if (!ti_sci_is_response_ack(resp))
2602 return -ENODEV;
2603
2604 return 0;
2605}
2606
2607/**
2608 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2609 * @handle: pointer to TI SCI handle
2610 * @region: region configuration parameters
2611 *
2612 * Return: 0 if all went well, else returns appropriate error value.
2613 */
2614static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2615 struct ti_sci_msg_fwl_region *region)
2616{
2617 struct ti_sci_msg_fwl_get_firewall_region_req req;
2618 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2619 struct ti_sci_info *info;
2620 struct ti_sci_xfer *xfer;
2621 int ret = 0;
2622
2623 if (IS_ERR(handle))
2624 return PTR_ERR(handle);
2625 if (!handle)
2626 return -EINVAL;
2627
2628 info = handle_to_ti_sci_info(handle);
2629
2630 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2631 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2632 (u32 *)&req, sizeof(req), sizeof(*resp));
2633 if (IS_ERR(xfer)) {
2634 ret = PTR_ERR(xfer);
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002635 return ret;
2636 }
2637
2638 req.fwl_id = region->fwl_id;
2639 req.region = region->region;
2640 req.n_permission_regs = region->n_permission_regs;
2641
2642 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002643 if (ret)
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002644 return ret;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002645
2646 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2647
2648 if (!ti_sci_is_response_ack(resp))
2649 return -ENODEV;
2650
2651 region->fwl_id = resp->fwl_id;
2652 region->region = resp->region;
2653 region->n_permission_regs = resp->n_permission_regs;
2654 region->control = resp->control;
2655 region->permissions[0] = resp->permissions[0];
2656 region->permissions[1] = resp->permissions[1];
2657 region->permissions[2] = resp->permissions[2];
2658 region->start_address = resp->start_address;
2659 region->end_address = resp->end_address;
2660
2661 return 0;
2662}
2663
2664/**
2665 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2666 * @handle: pointer to TI SCI handle
2667 * @region: region configuration parameters
2668 *
2669 * Return: 0 if all went well, else returns appropriate error value.
2670 */
2671static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2672 struct ti_sci_msg_fwl_owner *owner)
2673{
2674 struct ti_sci_msg_fwl_change_owner_info_req req;
2675 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2676 struct ti_sci_info *info;
2677 struct ti_sci_xfer *xfer;
2678 int ret = 0;
2679
2680 if (IS_ERR(handle))
2681 return PTR_ERR(handle);
2682 if (!handle)
2683 return -EINVAL;
2684
2685 info = handle_to_ti_sci_info(handle);
2686
Andrew F. Davisefbfd442019-04-29 09:04:11 -04002687 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2688 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002689 (u32 *)&req, sizeof(req), sizeof(*resp));
2690 if (IS_ERR(xfer)) {
2691 ret = PTR_ERR(xfer);
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002692 return ret;
2693 }
2694
2695 req.fwl_id = owner->fwl_id;
2696 req.region = owner->region;
2697 req.owner_index = owner->owner_index;
2698
2699 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis59178502022-07-25 20:25:03 -05002700 if (ret)
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002701 return ret;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002702
2703 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2704
2705 if (!ti_sci_is_response_ack(resp))
2706 return -ENODEV;
2707
2708 owner->fwl_id = resp->fwl_id;
2709 owner->region = resp->region;
2710 owner->owner_index = resp->owner_index;
2711 owner->owner_privid = resp->owner_privid;
2712 owner->owner_permission_bits = resp->owner_permission_bits;
2713
2714 return ret;
2715}
2716
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302717/*
2718 * ti_sci_setup_ops() - Setup the operations structures
2719 * @info: pointer to TISCI pointer
2720 */
2721static void ti_sci_setup_ops(struct ti_sci_info *info)
2722{
2723 struct ti_sci_ops *ops = &info->handle.ops;
2724 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302725 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302726 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302727 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302728 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302729 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302730 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2731 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2732 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002733 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302734
2735 bops->board_config = ti_sci_cmd_set_board_config;
2736 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2737 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2738 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302739
2740 dops->get_device = ti_sci_cmd_get_device;
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +05302741 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302742 dops->idle_device = ti_sci_cmd_idle_device;
Lokesh Vutlaae0b8a22019-06-07 19:24:39 +05302743 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302744 dops->put_device = ti_sci_cmd_put_device;
2745 dops->is_valid = ti_sci_cmd_dev_is_valid;
2746 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2747 dops->is_idle = ti_sci_cmd_dev_is_idle;
2748 dops->is_stop = ti_sci_cmd_dev_is_stop;
2749 dops->is_on = ti_sci_cmd_dev_is_on;
2750 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2751 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2752 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla9566b772019-06-07 19:24:41 +05302753 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302754
2755 cops->get_clock = ti_sci_cmd_get_clock;
2756 cops->idle_clock = ti_sci_cmd_idle_clock;
2757 cops->put_clock = ti_sci_cmd_put_clock;
2758 cops->is_auto = ti_sci_cmd_clk_is_auto;
2759 cops->is_on = ti_sci_cmd_clk_is_on;
2760 cops->is_off = ti_sci_cmd_clk_is_off;
2761
2762 cops->set_parent = ti_sci_cmd_clk_set_parent;
2763 cops->get_parent = ti_sci_cmd_clk_get_parent;
2764 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2765
2766 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2767 cops->set_freq = ti_sci_cmd_clk_set_freq;
2768 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302769
2770 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla826eb742019-03-08 11:47:32 +05302771 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302772
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302773 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2774 rm_core_ops->get_range_from_shost =
2775 ti_sci_cmd_get_resource_range_from_shost;
2776
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302777 pops->proc_request = ti_sci_cmd_proc_request;
2778 pops->proc_release = ti_sci_cmd_proc_release;
2779 pops->proc_handover = ti_sci_cmd_proc_handover;
2780 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2781 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2782 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2783 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Andreas Dannenberg410adcc2019-06-07 19:24:40 +05302784 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302785
2786 rops->config = ti_sci_cmd_ring_config;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302787
2788 psilops->pair = ti_sci_cmd_rm_psil_pair;
2789 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2790
2791 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2792 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2793 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002794
2795 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2796 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2797 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302798}
2799
2800/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302801 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2802 * @dev: Pointer to the SYSFW device
2803 *
2804 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2805 * are encountered.
2806 */
2807const
2808struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2809{
2810 if (!sci_dev)
2811 return ERR_PTR(-EINVAL);
2812
2813 struct ti_sci_info *info = dev_get_priv(sci_dev);
2814
2815 if (!info)
2816 return ERR_PTR(-EINVAL);
2817
2818 struct ti_sci_handle *handle = &info->handle;
2819
2820 if (!handle)
2821 return ERR_PTR(-EINVAL);
2822
2823 return handle;
2824}
2825
2826/**
2827 * ti_sci_get_handle() - Get the TI SCI handle for a device
2828 * @dev: Pointer to device for which we want SCI handle
2829 *
2830 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2831 * are encountered.
2832 */
2833const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2834{
2835 if (!dev)
2836 return ERR_PTR(-EINVAL);
2837
2838 struct udevice *sci_dev = dev_get_parent(dev);
2839
2840 return ti_sci_get_handle_from_sysfw(sci_dev);
2841}
2842
2843/**
2844 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2845 * @dev: device node
2846 * @propname: property name containing phandle on TISCI node
2847 *
2848 * Return: pointer to handle if successful, else appropriate error value.
2849 */
2850const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2851 const char *property)
2852{
2853 struct ti_sci_info *entry, *info = NULL;
2854 u32 phandle, err;
2855 ofnode node;
2856
2857 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2858 if (err)
2859 return ERR_PTR(err);
2860
2861 node = ofnode_get_by_phandle(phandle);
2862 if (!ofnode_valid(node))
2863 return ERR_PTR(-EINVAL);
2864
2865 list_for_each_entry(entry, &ti_sci_list, list)
2866 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2867 info = entry;
2868 break;
2869 }
2870
2871 if (!info)
2872 return ERR_PTR(-ENODEV);
2873
2874 return &info->handle;
2875}
2876
2877/**
2878 * ti_sci_of_to_info() - generate private data from device tree
2879 * @dev: corresponding system controller interface device
2880 * @info: pointer to driver specific private data
2881 *
2882 * Return: 0 if all goes good, else appropriate error message.
2883 */
2884static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2885{
2886 int ret;
2887
2888 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2889 if (ret) {
2890 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2891 __func__, ret);
2892 return ret;
2893 }
2894
2895 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2896 if (ret) {
2897 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2898 __func__, ret);
2899 return ret;
2900 }
2901
2902 /* Notify channel is optional. Enable only if populated */
2903 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2904 if (ret) {
2905 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2906 __func__, ret);
2907 }
2908
2909 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302910 info->desc->default_host_id);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302911
2912 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2913
2914 return 0;
2915}
2916
2917/**
2918 * ti_sci_probe() - Basic probe
2919 * @dev: corresponding system controller interface device
2920 *
2921 * Return: 0 if all goes good, else appropriate error message.
2922 */
2923static int ti_sci_probe(struct udevice *dev)
2924{
2925 struct ti_sci_info *info;
2926 int ret;
2927
2928 debug("%s(dev=%p)\n", __func__, dev);
2929
2930 info = dev_get_priv(dev);
2931 info->desc = (void *)dev_get_driver_data(dev);
2932
2933 ret = ti_sci_of_to_info(dev, info);
2934 if (ret) {
2935 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2936 return ret;
2937 }
2938
2939 info->dev = dev;
2940 info->seq = 0xA;
2941
2942 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302943 ti_sci_setup_ops(info);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302944
2945 ret = ti_sci_cmd_get_revision(&info->handle);
2946
Lokesh Vutla9566b772019-06-07 19:24:41 +05302947 INIT_LIST_HEAD(&info->dev_list);
2948
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302949 return ret;
2950}
2951
Vignesh Raghavendra5d5a6992021-06-07 19:47:49 +05302952/**
2953 * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
2954 * @dev: corresponding system controller interface device
2955 *
2956 * Return: 0 if all goes good, else appropriate error message.
2957 */
2958static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
2959{
2960 struct ti_sci_rm_core_ops *rm_core_ops;
2961 struct ti_sci_rm_udmap_ops *udmap_ops;
2962 struct ti_sci_rm_ringacc_ops *rops;
2963 struct ti_sci_rm_psil_ops *psilops;
2964 struct ti_sci_ops *ops;
2965 struct ti_sci_info *info;
2966 int ret;
2967
2968 debug("%s(dev=%p)\n", __func__, dev);
2969
2970 info = dev_get_priv(dev);
2971 info->desc = (void *)dev_get_driver_data(dev);
2972
2973 ret = ti_sci_of_to_info(dev, info);
2974 if (ret) {
2975 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2976 return ret;
2977 }
2978
2979 info->dev = dev;
2980 info->seq = 0xA;
2981
2982 list_add_tail(&info->list, &ti_sci_list);
2983
2984 ops = &info->handle.ops;
2985
2986 rm_core_ops = &ops->rm_core_ops;
2987 rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
2988
2989 rops = &ops->rm_ring_ops;
2990 rops->config = ti_sci_cmd_ring_config;
2991
2992 psilops = &ops->rm_psil_ops;
2993 psilops->pair = ti_sci_cmd_rm_psil_pair;
2994 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2995
2996 udmap_ops = &ops->rm_udmap_ops;
2997 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2998 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2999 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3000
3001 return ret;
3002}
3003
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303004/*
3005 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3006 * @res: Pointer to the TISCI resource
3007 *
3008 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3009 */
3010u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3011{
3012 u16 set, free_bit;
3013
3014 for (set = 0; set < res->sets; set++) {
3015 free_bit = find_first_zero_bit(res->desc[set].res_map,
3016 res->desc[set].num);
3017 if (free_bit != res->desc[set].num) {
3018 set_bit(free_bit, res->desc[set].res_map);
3019 return res->desc[set].start + free_bit;
3020 }
3021 }
3022
3023 return TI_SCI_RESOURCE_NULL;
3024}
3025
3026/**
3027 * ti_sci_release_resource() - Release a resource from TISCI resource.
3028 * @res: Pointer to the TISCI resource
3029 */
3030void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3031{
3032 u16 set;
3033
3034 for (set = 0; set < res->sets; set++) {
3035 if (res->desc[set].start <= id &&
3036 (res->desc[set].num + res->desc[set].start) > id)
3037 clear_bit(id - res->desc[set].start,
3038 res->desc[set].res_map);
3039 }
3040}
3041
3042/**
3043 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3044 * @handle: TISCI handle
3045 * @dev: Device pointer to which the resource is assigned
3046 * @of_prop: property name by which the resource are represented
3047 *
3048 * Note: This function expects of_prop to be in the form of tuples
3049 * <type, subtype>. Allocates and initializes ti_sci_resource structure
3050 * for each of_prop. Client driver can directly call
3051 * ti_sci_(get_free, release)_resource apis for handling the resource.
3052 *
3053 * Return: Pointer to ti_sci_resource if all went well else appropriate
3054 * error pointer.
3055 */
3056struct ti_sci_resource *
3057devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3058 struct udevice *dev, u32 dev_id, char *of_prop)
3059{
3060 u32 resource_subtype;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303061 struct ti_sci_resource *res;
Vignesh Raghavendrac659a972019-08-05 12:26:44 -05003062 bool valid_set = false;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303063 int sets, i, ret;
3064 u32 *temp;
3065
3066 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3067 if (!res)
3068 return ERR_PTR(-ENOMEM);
3069
3070 sets = dev_read_size(dev, of_prop);
3071 if (sets < 0) {
3072 dev_err(dev, "%s resource type ids not available\n", of_prop);
3073 return ERR_PTR(sets);
3074 }
3075 temp = malloc(sets);
3076 sets /= sizeof(u32);
3077 res->sets = sets;
3078
3079 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3080 GFP_KERNEL);
3081 if (!res->desc)
3082 return ERR_PTR(-ENOMEM);
3083
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303084 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3085 if (ret)
3086 return ERR_PTR(-EINVAL);
3087
3088 for (i = 0; i < res->sets; i++) {
3089 resource_subtype = temp[i];
3090 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3091 resource_subtype,
3092 &res->desc[i].start,
3093 &res->desc[i].num);
3094 if (ret) {
Vignesh Raghavendrac659a972019-08-05 12:26:44 -05003095 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
Lokesh Vutla4986b152020-08-17 11:00:48 +05303096 dev_id, resource_subtype,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303097 handle_to_ti_sci_info(handle)->host_id);
Vignesh Raghavendrac659a972019-08-05 12:26:44 -05003098 res->desc[i].start = 0;
3099 res->desc[i].num = 0;
3100 continue;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303101 }
3102
Vignesh Raghavendrac659a972019-08-05 12:26:44 -05003103 valid_set = true;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303104 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
Lokesh Vutla4986b152020-08-17 11:00:48 +05303105 dev_id, resource_subtype, res->desc[i].start,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303106 res->desc[i].num);
3107
3108 res->desc[i].res_map =
3109 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3110 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3111 if (!res->desc[i].res_map)
3112 return ERR_PTR(-ENOMEM);
3113 }
3114
Vignesh Raghavendrac659a972019-08-05 12:26:44 -05003115 if (valid_set)
3116 return res;
3117
3118 return ERR_PTR(-EINVAL);
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303119}
3120
3121/* Description for K2G */
3122static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3123 .default_host_id = 2,
3124 /* Conservative duration */
3125 .max_rx_timeout_ms = 10000,
3126 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3127 .max_msgs = 20,
3128 .max_msg_size = 64,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303129};
3130
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303131/* Description for AM654 */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303132static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3133 .default_host_id = 12,
3134 /* Conservative duration */
3135 .max_rx_timeout_ms = 10000,
3136 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3137 .max_msgs = 20,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303138 .max_msg_size = 60,
3139};
3140
Vignesh Raghavendra5d5a6992021-06-07 19:47:49 +05303141/* Description for J721e DM to DMSC communication */
3142static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3143 .default_host_id = 3,
3144 .max_rx_timeout_ms = 10000,
3145 .max_msgs = 20,
3146 .max_msg_size = 60,
3147};
3148
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303149static const struct udevice_id ti_sci_ids[] = {
3150 {
3151 .compatible = "ti,k2g-sci",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05303152 .data = (ulong)&ti_sci_pmmc_k2g_desc
3153 },
3154 {
3155 .compatible = "ti,am654-sci",
3156 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303157 },
3158 { /* Sentinel */ },
3159};
3160
Vignesh Raghavendra5d5a6992021-06-07 19:47:49 +05303161static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3162 {
3163 .compatible = "ti,j721e-dm-sci",
3164 .data = (ulong)&ti_sci_dm_j721e_desc
3165 },
3166 { /* Sentinel */ },
3167};
3168
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303169U_BOOT_DRIVER(ti_sci) = {
3170 .name = "ti_sci",
3171 .id = UCLASS_FIRMWARE,
3172 .of_match = ti_sci_ids,
3173 .probe = ti_sci_probe,
Simon Glass41575d82020-12-03 16:55:17 -07003174 .priv_auto = sizeof(struct ti_sci_info),
Lokesh Vutla32cd2512018-08-27 15:57:32 +05303175};
Vignesh Raghavendra5d5a6992021-06-07 19:47:49 +05303176
3177#if IS_ENABLED(CONFIG_K3_DM_FW)
3178U_BOOT_DRIVER(ti_sci_dm) = {
3179 .name = "ti_sci_dm",
3180 .id = UCLASS_FIRMWARE,
3181 .of_match = ti_sci_dm_ids,
3182 .probe = ti_sci_dm_probe,
3183 .priv_auto = sizeof(struct ti_sci_info),
3184};
3185#endif