blob: b972faf01328313dcf75d7f4ea1d949a30b05396 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warren89c1e2d2016-06-17 09:43:58 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
Stephen Warren89c1e2d2016-06-17 09:43:58 -06004 */
5
Patrick Delaunayb953ec22021-04-27 11:02:19 +02006#define LOG_CATEGORY UCLASS_RESET
7
Stephen Warren89c1e2d2016-06-17 09:43:58 -06008#include <common.h>
9#include <dm.h>
10#include <fdtdec.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070012#include <malloc.h>
Stephen Warren89c1e2d2016-06-17 09:43:58 -060013#include <reset.h>
14#include <reset-uclass.h>
Simon Glass61b29b82020-02-03 07:36:15 -070015#include <dm/devres.h>
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +053016#include <dm/lists.h>
Stephen Warren89c1e2d2016-06-17 09:43:58 -060017
Stephen Warren89c1e2d2016-06-17 09:43:58 -060018static inline struct reset_ops *reset_dev_ops(struct udevice *dev)
19{
20 return (struct reset_ops *)dev->driver->ops;
21}
22
23static int reset_of_xlate_default(struct reset_ctl *reset_ctl,
Simon Glass40a475e2017-05-18 20:09:50 -060024 struct ofnode_phandle_args *args)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060025{
26 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
27
28 if (args->args_count != 1) {
Sean Anderson46ad7ce2021-12-01 14:26:53 -050029 debug("Invalid args_count: %d\n", args->args_count);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060030 return -EINVAL;
31 }
32
33 reset_ctl->id = args->args[0];
34
35 return 0;
36}
37
Jagan Tekiea9dc352019-02-28 00:26:55 +053038static int reset_get_by_index_tail(int ret, ofnode node,
39 struct ofnode_phandle_args *args,
40 const char *list_name, int index,
41 struct reset_ctl *reset_ctl)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060042{
Stephen Warren89c1e2d2016-06-17 09:43:58 -060043 struct udevice *dev_reset;
44 struct reset_ops *ops;
45
Jagan Tekiea9dc352019-02-28 00:26:55 +053046 assert(reset_ctl);
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +020047 reset_ctl->dev = NULL;
Jagan Tekiea9dc352019-02-28 00:26:55 +053048 if (ret)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060049 return ret;
Stephen Warren89c1e2d2016-06-17 09:43:58 -060050
Jagan Tekiea9dc352019-02-28 00:26:55 +053051 ret = uclass_get_device_by_ofnode(UCLASS_RESET, args->node,
Simon Glass40a475e2017-05-18 20:09:50 -060052 &dev_reset);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060053 if (ret) {
Simon Glass40a475e2017-05-18 20:09:50 -060054 debug("%s: uclass_get_device_by_ofnode() failed: %d\n",
Stephen Warren89c1e2d2016-06-17 09:43:58 -060055 __func__, ret);
Jagan Tekiea9dc352019-02-28 00:26:55 +053056 debug("%s %d\n", ofnode_get_name(args->node), args->args[0]);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060057 return ret;
58 }
59 ops = reset_dev_ops(dev_reset);
60
61 reset_ctl->dev = dev_reset;
62 if (ops->of_xlate)
Jagan Tekiea9dc352019-02-28 00:26:55 +053063 ret = ops->of_xlate(reset_ctl, args);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060064 else
Jagan Tekiea9dc352019-02-28 00:26:55 +053065 ret = reset_of_xlate_default(reset_ctl, args);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060066 if (ret) {
67 debug("of_xlate() failed: %d\n", ret);
68 return ret;
69 }
70
Marek Vasut0be4b0b2022-04-26 23:43:30 +020071 ret = ops->request ? ops->request(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -060072 if (ret) {
73 debug("ops->request() failed: %d\n", ret);
74 return ret;
75 }
76
77 return 0;
78}
79
Jagan Tekiea9dc352019-02-28 00:26:55 +053080int reset_get_by_index(struct udevice *dev, int index,
81 struct reset_ctl *reset_ctl)
82{
83 struct ofnode_phandle_args args;
84 int ret;
85
86 ret = dev_read_phandle_with_args(dev, "resets", "#reset-cells", 0,
87 index, &args);
88
89 return reset_get_by_index_tail(ret, dev_ofnode(dev), &args, "resets",
90 index > 0, reset_ctl);
91}
92
93int reset_get_by_index_nodev(ofnode node, int index,
94 struct reset_ctl *reset_ctl)
95{
96 struct ofnode_phandle_args args;
97 int ret;
98
99 ret = ofnode_parse_phandle_with_args(node, "resets", "#reset-cells", 0,
Neil Armstrong67e69662021-04-20 10:42:26 +0200100 index, &args);
Jagan Tekiea9dc352019-02-28 00:26:55 +0530101
102 return reset_get_by_index_tail(ret, node, &args, "resets",
103 index > 0, reset_ctl);
104}
105
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530106static int __reset_get_bulk(struct udevice *dev, ofnode node,
107 struct reset_ctl_bulk *bulk)
Neil Armstrong0c282332018-04-03 11:40:50 +0200108{
109 int i, ret, err, count;
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530110
Neil Armstrong0c282332018-04-03 11:40:50 +0200111 bulk->count = 0;
112
Patrick Delaunay89f68302020-09-25 09:41:14 +0200113 count = ofnode_count_phandle_with_args(node, "resets", "#reset-cells",
114 0);
Neil Armstrong895a82c2018-04-17 11:30:22 +0200115 if (count < 1)
116 return count;
Neil Armstrong0c282332018-04-03 11:40:50 +0200117
118 bulk->resets = devm_kcalloc(dev, count, sizeof(struct reset_ctl),
119 GFP_KERNEL);
120 if (!bulk->resets)
121 return -ENOMEM;
122
123 for (i = 0; i < count; i++) {
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530124 ret = reset_get_by_index_nodev(node, i, &bulk->resets[i]);
Neil Armstrong0c282332018-04-03 11:40:50 +0200125 if (ret < 0)
126 goto bulk_get_err;
127
128 ++bulk->count;
129 }
130
131 return 0;
132
133bulk_get_err:
134 err = reset_release_all(bulk->resets, bulk->count);
135 if (err)
136 debug("%s: could release all resets for %p\n",
137 __func__, dev);
138
139 return ret;
140}
141
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530142int reset_get_bulk(struct udevice *dev, struct reset_ctl_bulk *bulk)
143{
144 return __reset_get_bulk(dev, dev_ofnode(dev), bulk);
145}
146
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600147int reset_get_by_name(struct udevice *dev, const char *name,
148 struct reset_ctl *reset_ctl)
149{
Samuel Holland5a675ab2023-01-21 18:02:52 -0600150 int index = 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600151
152 debug("%s(dev=%p, name=%s, reset_ctl=%p)\n", __func__, dev, name,
153 reset_ctl);
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +0200154 reset_ctl->dev = NULL;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600155
Samuel Holland5a675ab2023-01-21 18:02:52 -0600156 if (name) {
157 index = dev_read_stringlist_search(dev, "reset-names", name);
158 if (index < 0) {
159 debug("fdt_stringlist_search() failed: %d\n", index);
160 return index;
161 }
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600162 }
163
164 return reset_get_by_index(dev, index, reset_ctl);
165}
166
Patrice Chotard9bd5cdf2017-07-18 11:57:05 +0200167int reset_request(struct reset_ctl *reset_ctl)
168{
169 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
170
171 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
172
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200173 return ops->request ? ops->request(reset_ctl) : 0;
Patrice Chotard9bd5cdf2017-07-18 11:57:05 +0200174}
175
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600176int reset_free(struct reset_ctl *reset_ctl)
177{
178 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
179
180 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
181
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200182 return ops->rfree ? ops->rfree(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600183}
184
185int reset_assert(struct reset_ctl *reset_ctl)
186{
187 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
188
189 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
190
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200191 return ops->rst_assert ? ops->rst_assert(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600192}
193
Neil Armstrong0c282332018-04-03 11:40:50 +0200194int reset_assert_bulk(struct reset_ctl_bulk *bulk)
195{
196 int i, ret;
197
198 for (i = 0; i < bulk->count; i++) {
199 ret = reset_assert(&bulk->resets[i]);
200 if (ret < 0)
201 return ret;
202 }
203
204 return 0;
205}
206
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600207int reset_deassert(struct reset_ctl *reset_ctl)
208{
209 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
210
211 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
212
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200213 return ops->rst_deassert ? ops->rst_deassert(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600214}
215
Neil Armstrong0c282332018-04-03 11:40:50 +0200216int reset_deassert_bulk(struct reset_ctl_bulk *bulk)
217{
218 int i, ret;
219
220 for (i = 0; i < bulk->count; i++) {
221 ret = reset_deassert(&bulk->resets[i]);
222 if (ret < 0)
223 return ret;
224 }
225
226 return 0;
227}
228
Andreas Dannenberge7012e62018-08-27 15:57:39 +0530229int reset_status(struct reset_ctl *reset_ctl)
230{
231 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
232
233 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
234
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200235 return ops->rst_status ? ops->rst_status(reset_ctl) : 0;
Andreas Dannenberge7012e62018-08-27 15:57:39 +0530236}
237
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +0200238int reset_release_all(struct reset_ctl *reset_ctl, int count)
239{
240 int i, ret;
241
242 for (i = 0; i < count; i++) {
243 debug("%s(reset_ctl[%d]=%p)\n", __func__, i, &reset_ctl[i]);
244
245 /* check if reset has been previously requested */
246 if (!reset_ctl[i].dev)
247 continue;
248
249 ret = reset_assert(&reset_ctl[i]);
250 if (ret)
251 return ret;
252
253 ret = reset_free(&reset_ctl[i]);
254 if (ret)
255 return ret;
256 }
257
258 return 0;
259}
260
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530261static void devm_reset_release(struct udevice *dev, void *res)
262{
263 reset_free(res);
264}
265
266struct reset_ctl *devm_reset_control_get_by_index(struct udevice *dev,
267 int index)
268{
269 int rc;
270 struct reset_ctl *reset_ctl;
271
272 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
273 __GFP_ZERO);
274 if (unlikely(!reset_ctl))
275 return ERR_PTR(-ENOMEM);
276
277 rc = reset_get_by_index(dev, index, reset_ctl);
278 if (rc)
279 return ERR_PTR(rc);
280
281 devres_add(dev, reset_ctl);
282 return reset_ctl;
283}
284
285struct reset_ctl *devm_reset_control_get(struct udevice *dev, const char *id)
286{
287 int rc;
288 struct reset_ctl *reset_ctl;
289
290 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
291 __GFP_ZERO);
292 if (unlikely(!reset_ctl))
293 return ERR_PTR(-ENOMEM);
294
295 rc = reset_get_by_name(dev, id, reset_ctl);
296 if (rc)
297 return ERR_PTR(rc);
298
299 devres_add(dev, reset_ctl);
300 return reset_ctl;
301}
302
303struct reset_ctl *devm_reset_control_get_optional(struct udevice *dev,
304 const char *id)
305{
306 struct reset_ctl *r = devm_reset_control_get(dev, id);
307
308 if (IS_ERR(r))
309 return NULL;
310
311 return r;
312}
313
314static void devm_reset_bulk_release(struct udevice *dev, void *res)
315{
316 struct reset_ctl_bulk *bulk = res;
317
318 reset_release_all(bulk->resets, bulk->count);
319}
320
321struct reset_ctl_bulk *devm_reset_bulk_get_by_node(struct udevice *dev,
322 ofnode node)
323{
324 int rc;
325 struct reset_ctl_bulk *bulk;
326
327 bulk = devres_alloc(devm_reset_bulk_release,
328 sizeof(struct reset_ctl_bulk),
329 __GFP_ZERO);
Simon Glassca4c2452021-05-13 19:39:21 -0600330
331 /* this looks like a leak, but devres takes care of it */
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530332 if (unlikely(!bulk))
333 return ERR_PTR(-ENOMEM);
334
335 rc = __reset_get_bulk(dev, node, bulk);
336 if (rc)
337 return ERR_PTR(rc);
338
339 devres_add(dev, bulk);
340 return bulk;
341}
342
343struct reset_ctl_bulk *devm_reset_bulk_get_optional_by_node(struct udevice *dev,
344 ofnode node)
345{
346 struct reset_ctl_bulk *bulk;
347
348 bulk = devm_reset_bulk_get_by_node(dev, node);
349
350 if (IS_ERR(bulk))
351 return NULL;
352
353 return bulk;
354}
355
356struct reset_ctl_bulk *devm_reset_bulk_get(struct udevice *dev)
357{
358 return devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
359}
360
361struct reset_ctl_bulk *devm_reset_bulk_get_optional(struct udevice *dev)
362{
363 return devm_reset_bulk_get_optional_by_node(dev, dev_ofnode(dev));
364}
365
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600366UCLASS_DRIVER(reset) = {
367 .id = UCLASS_RESET,
368 .name = "reset",
369};