blob: bcef46039c215f031ac349a2158842e7f936ee05 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warren89c1e2d2016-06-17 09:43:58 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
Stephen Warren89c1e2d2016-06-17 09:43:58 -06004 */
5
Patrick Delaunayb953ec22021-04-27 11:02:19 +02006#define LOG_CATEGORY UCLASS_RESET
7
Stephen Warren89c1e2d2016-06-17 09:43:58 -06008#include <common.h>
9#include <dm.h>
10#include <fdtdec.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070012#include <malloc.h>
Stephen Warren89c1e2d2016-06-17 09:43:58 -060013#include <reset.h>
14#include <reset-uclass.h>
Simon Glass61b29b82020-02-03 07:36:15 -070015#include <dm/devres.h>
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +053016#include <dm/lists.h>
Stephen Warren89c1e2d2016-06-17 09:43:58 -060017
Stephen Warren89c1e2d2016-06-17 09:43:58 -060018static inline struct reset_ops *reset_dev_ops(struct udevice *dev)
19{
20 return (struct reset_ops *)dev->driver->ops;
21}
22
23static int reset_of_xlate_default(struct reset_ctl *reset_ctl,
Simon Glass40a475e2017-05-18 20:09:50 -060024 struct ofnode_phandle_args *args)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060025{
26 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
27
28 if (args->args_count != 1) {
Sean Anderson46ad7ce2021-12-01 14:26:53 -050029 debug("Invalid args_count: %d\n", args->args_count);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060030 return -EINVAL;
31 }
32
33 reset_ctl->id = args->args[0];
34
35 return 0;
36}
37
Jagan Tekiea9dc352019-02-28 00:26:55 +053038static int reset_get_by_index_tail(int ret, ofnode node,
39 struct ofnode_phandle_args *args,
40 const char *list_name, int index,
41 struct reset_ctl *reset_ctl)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060042{
Stephen Warren89c1e2d2016-06-17 09:43:58 -060043 struct udevice *dev_reset;
44 struct reset_ops *ops;
45
Jagan Tekiea9dc352019-02-28 00:26:55 +053046 assert(reset_ctl);
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +020047 reset_ctl->dev = NULL;
Jagan Tekiea9dc352019-02-28 00:26:55 +053048 if (ret)
Stephen Warren89c1e2d2016-06-17 09:43:58 -060049 return ret;
Stephen Warren89c1e2d2016-06-17 09:43:58 -060050
Jagan Tekiea9dc352019-02-28 00:26:55 +053051 ret = uclass_get_device_by_ofnode(UCLASS_RESET, args->node,
Simon Glass40a475e2017-05-18 20:09:50 -060052 &dev_reset);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060053 if (ret) {
Simon Glass40a475e2017-05-18 20:09:50 -060054 debug("%s: uclass_get_device_by_ofnode() failed: %d\n",
Stephen Warren89c1e2d2016-06-17 09:43:58 -060055 __func__, ret);
Jagan Tekiea9dc352019-02-28 00:26:55 +053056 debug("%s %d\n", ofnode_get_name(args->node), args->args[0]);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060057 return ret;
58 }
59 ops = reset_dev_ops(dev_reset);
60
61 reset_ctl->dev = dev_reset;
62 if (ops->of_xlate)
Jagan Tekiea9dc352019-02-28 00:26:55 +053063 ret = ops->of_xlate(reset_ctl, args);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060064 else
Jagan Tekiea9dc352019-02-28 00:26:55 +053065 ret = reset_of_xlate_default(reset_ctl, args);
Stephen Warren89c1e2d2016-06-17 09:43:58 -060066 if (ret) {
67 debug("of_xlate() failed: %d\n", ret);
68 return ret;
69 }
70
Marek Vasut0be4b0b2022-04-26 23:43:30 +020071 ret = ops->request ? ops->request(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -060072 if (ret) {
73 debug("ops->request() failed: %d\n", ret);
74 return ret;
75 }
76
77 return 0;
78}
79
Jagan Tekiea9dc352019-02-28 00:26:55 +053080int reset_get_by_index(struct udevice *dev, int index,
81 struct reset_ctl *reset_ctl)
82{
83 struct ofnode_phandle_args args;
84 int ret;
85
86 ret = dev_read_phandle_with_args(dev, "resets", "#reset-cells", 0,
87 index, &args);
88
89 return reset_get_by_index_tail(ret, dev_ofnode(dev), &args, "resets",
90 index > 0, reset_ctl);
91}
92
93int reset_get_by_index_nodev(ofnode node, int index,
94 struct reset_ctl *reset_ctl)
95{
96 struct ofnode_phandle_args args;
97 int ret;
98
99 ret = ofnode_parse_phandle_with_args(node, "resets", "#reset-cells", 0,
Neil Armstrong67e69662021-04-20 10:42:26 +0200100 index, &args);
Jagan Tekiea9dc352019-02-28 00:26:55 +0530101
102 return reset_get_by_index_tail(ret, node, &args, "resets",
103 index > 0, reset_ctl);
104}
105
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530106static int __reset_get_bulk(struct udevice *dev, ofnode node,
107 struct reset_ctl_bulk *bulk)
Neil Armstrong0c282332018-04-03 11:40:50 +0200108{
109 int i, ret, err, count;
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530110
Neil Armstrong0c282332018-04-03 11:40:50 +0200111 bulk->count = 0;
112
Patrick Delaunay89f68302020-09-25 09:41:14 +0200113 count = ofnode_count_phandle_with_args(node, "resets", "#reset-cells",
114 0);
Neil Armstrong895a82c2018-04-17 11:30:22 +0200115 if (count < 1)
116 return count;
Neil Armstrong0c282332018-04-03 11:40:50 +0200117
118 bulk->resets = devm_kcalloc(dev, count, sizeof(struct reset_ctl),
119 GFP_KERNEL);
120 if (!bulk->resets)
121 return -ENOMEM;
122
123 for (i = 0; i < count; i++) {
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530124 ret = reset_get_by_index_nodev(node, i, &bulk->resets[i]);
Neil Armstrong0c282332018-04-03 11:40:50 +0200125 if (ret < 0)
126 goto bulk_get_err;
127
128 ++bulk->count;
129 }
130
131 return 0;
132
133bulk_get_err:
134 err = reset_release_all(bulk->resets, bulk->count);
135 if (err)
136 debug("%s: could release all resets for %p\n",
137 __func__, dev);
138
139 return ret;
140}
141
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530142int reset_get_bulk(struct udevice *dev, struct reset_ctl_bulk *bulk)
143{
144 return __reset_get_bulk(dev, dev_ofnode(dev), bulk);
145}
146
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600147int reset_get_by_name(struct udevice *dev, const char *name,
148 struct reset_ctl *reset_ctl)
149{
150 int index;
151
152 debug("%s(dev=%p, name=%s, reset_ctl=%p)\n", __func__, dev, name,
153 reset_ctl);
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +0200154 reset_ctl->dev = NULL;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600155
Simon Glass40a475e2017-05-18 20:09:50 -0600156 index = dev_read_stringlist_search(dev, "reset-names", name);
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600157 if (index < 0) {
Simon Glassb02e4042016-10-02 17:59:28 -0600158 debug("fdt_stringlist_search() failed: %d\n", index);
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600159 return index;
160 }
161
162 return reset_get_by_index(dev, index, reset_ctl);
163}
164
Patrice Chotard9bd5cdf2017-07-18 11:57:05 +0200165int reset_request(struct reset_ctl *reset_ctl)
166{
167 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
168
169 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
170
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200171 return ops->request ? ops->request(reset_ctl) : 0;
Patrice Chotard9bd5cdf2017-07-18 11:57:05 +0200172}
173
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600174int reset_free(struct reset_ctl *reset_ctl)
175{
176 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
177
178 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
179
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200180 return ops->rfree ? ops->rfree(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600181}
182
183int reset_assert(struct reset_ctl *reset_ctl)
184{
185 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
186
187 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
188
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200189 return ops->rst_assert ? ops->rst_assert(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600190}
191
Neil Armstrong0c282332018-04-03 11:40:50 +0200192int reset_assert_bulk(struct reset_ctl_bulk *bulk)
193{
194 int i, ret;
195
196 for (i = 0; i < bulk->count; i++) {
197 ret = reset_assert(&bulk->resets[i]);
198 if (ret < 0)
199 return ret;
200 }
201
202 return 0;
203}
204
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600205int reset_deassert(struct reset_ctl *reset_ctl)
206{
207 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
208
209 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
210
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200211 return ops->rst_deassert ? ops->rst_deassert(reset_ctl) : 0;
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600212}
213
Neil Armstrong0c282332018-04-03 11:40:50 +0200214int reset_deassert_bulk(struct reset_ctl_bulk *bulk)
215{
216 int i, ret;
217
218 for (i = 0; i < bulk->count; i++) {
219 ret = reset_deassert(&bulk->resets[i]);
220 if (ret < 0)
221 return ret;
222 }
223
224 return 0;
225}
226
Andreas Dannenberge7012e62018-08-27 15:57:39 +0530227int reset_status(struct reset_ctl *reset_ctl)
228{
229 struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
230
231 debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
232
Marek Vasut0be4b0b2022-04-26 23:43:30 +0200233 return ops->rst_status ? ops->rst_status(reset_ctl) : 0;
Andreas Dannenberge7012e62018-08-27 15:57:39 +0530234}
235
Patrice Chotard3b9d1bd2017-07-18 11:57:06 +0200236int reset_release_all(struct reset_ctl *reset_ctl, int count)
237{
238 int i, ret;
239
240 for (i = 0; i < count; i++) {
241 debug("%s(reset_ctl[%d]=%p)\n", __func__, i, &reset_ctl[i]);
242
243 /* check if reset has been previously requested */
244 if (!reset_ctl[i].dev)
245 continue;
246
247 ret = reset_assert(&reset_ctl[i]);
248 if (ret)
249 return ret;
250
251 ret = reset_free(&reset_ctl[i]);
252 if (ret)
253 return ret;
254 }
255
256 return 0;
257}
258
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530259static void devm_reset_release(struct udevice *dev, void *res)
260{
261 reset_free(res);
262}
263
264struct reset_ctl *devm_reset_control_get_by_index(struct udevice *dev,
265 int index)
266{
267 int rc;
268 struct reset_ctl *reset_ctl;
269
270 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
271 __GFP_ZERO);
272 if (unlikely(!reset_ctl))
273 return ERR_PTR(-ENOMEM);
274
275 rc = reset_get_by_index(dev, index, reset_ctl);
276 if (rc)
277 return ERR_PTR(rc);
278
279 devres_add(dev, reset_ctl);
280 return reset_ctl;
281}
282
283struct reset_ctl *devm_reset_control_get(struct udevice *dev, const char *id)
284{
285 int rc;
286 struct reset_ctl *reset_ctl;
287
288 reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
289 __GFP_ZERO);
290 if (unlikely(!reset_ctl))
291 return ERR_PTR(-ENOMEM);
292
293 rc = reset_get_by_name(dev, id, reset_ctl);
294 if (rc)
295 return ERR_PTR(rc);
296
297 devres_add(dev, reset_ctl);
298 return reset_ctl;
299}
300
301struct reset_ctl *devm_reset_control_get_optional(struct udevice *dev,
302 const char *id)
303{
304 struct reset_ctl *r = devm_reset_control_get(dev, id);
305
306 if (IS_ERR(r))
307 return NULL;
308
309 return r;
310}
311
312static void devm_reset_bulk_release(struct udevice *dev, void *res)
313{
314 struct reset_ctl_bulk *bulk = res;
315
316 reset_release_all(bulk->resets, bulk->count);
317}
318
319struct reset_ctl_bulk *devm_reset_bulk_get_by_node(struct udevice *dev,
320 ofnode node)
321{
322 int rc;
323 struct reset_ctl_bulk *bulk;
324
325 bulk = devres_alloc(devm_reset_bulk_release,
326 sizeof(struct reset_ctl_bulk),
327 __GFP_ZERO);
Simon Glassca4c2452021-05-13 19:39:21 -0600328
329 /* this looks like a leak, but devres takes care of it */
Jean-Jacques Hiblot139e4a12020-09-09 15:37:03 +0530330 if (unlikely(!bulk))
331 return ERR_PTR(-ENOMEM);
332
333 rc = __reset_get_bulk(dev, node, bulk);
334 if (rc)
335 return ERR_PTR(rc);
336
337 devres_add(dev, bulk);
338 return bulk;
339}
340
341struct reset_ctl_bulk *devm_reset_bulk_get_optional_by_node(struct udevice *dev,
342 ofnode node)
343{
344 struct reset_ctl_bulk *bulk;
345
346 bulk = devm_reset_bulk_get_by_node(dev, node);
347
348 if (IS_ERR(bulk))
349 return NULL;
350
351 return bulk;
352}
353
354struct reset_ctl_bulk *devm_reset_bulk_get(struct udevice *dev)
355{
356 return devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
357}
358
359struct reset_ctl_bulk *devm_reset_bulk_get_optional(struct udevice *dev)
360{
361 return devm_reset_bulk_get_optional_by_node(dev, dev_ofnode(dev));
362}
363
Stephen Warren89c1e2d2016-06-17 09:43:58 -0600364UCLASS_DRIVER(reset) = {
365 .id = UCLASS_RESET,
366 .name = "reset",
367};