blob: 629ef3aa3de57fda8fcfda818fe059c728e34577 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +02002/*
3 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
4 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +02005 */
6
Patrick Delaunayb953ec22021-04-27 11:02:19 +02007#define LOG_CATEGORY UCLASS_PHY
8
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +02009#include <common.h>
10#include <dm.h>
Sean Andersonbdc1fdf2020-10-04 21:39:47 -040011#include <dm/device_compat.h>
Chunfeng Yunb13307b2020-05-02 11:35:11 +020012#include <dm/devres.h>
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +020013#include <generic-phy.h>
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030014#include <linux/list.h>
Eugen Hristevc57e0dc2023-05-15 12:59:47 +030015#include <power/regulator.h>
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030016
17/**
18 * struct phy_counts - Init and power-on counts of a single PHY port
19 *
20 * This structure is used to keep track of PHY initialization and power
21 * state change requests, so that we don't power off and deinitialize a
22 * PHY instance until all of its users want it done. Otherwise, multiple
23 * consumers using the same PHY port can cause problems (e.g. one might
24 * call power_off() after another's exit() and hang indefinitely).
25 *
26 * @id: The PHY ID within a PHY provider
27 * @power_on_count: Times generic_phy_power_on() was called for this ID
28 * without a matching generic_phy_power_off() afterwards
29 * @init_count: Times generic_phy_init() was called for this ID
30 * without a matching generic_phy_exit() afterwards
31 * @list: Handle for a linked list of these structures corresponding to
32 * ports of the same PHY provider
Eugen Hristevc57e0dc2023-05-15 12:59:47 +030033 * @supply: Handle to a phy-supply device
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030034 */
35struct phy_counts {
36 unsigned long id;
37 int power_on_count;
38 int init_count;
39 struct list_head list;
Eugen Hristevc57e0dc2023-05-15 12:59:47 +030040 struct udevice *supply;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030041};
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +020042
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +020043static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
44{
45 return (struct phy_ops *)dev->driver->ops;
46}
47
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030048static struct phy_counts *phy_get_counts(struct phy *phy)
49{
50 struct list_head *uc_priv;
51 struct phy_counts *counts;
52
53 if (!generic_phy_valid(phy))
54 return NULL;
55
56 uc_priv = dev_get_uclass_priv(phy->dev);
57 list_for_each_entry(counts, uc_priv, list)
58 if (counts->id == phy->id)
59 return counts;
60
61 return NULL;
62}
63
Eugen Hristevc57e0dc2023-05-15 12:59:47 +030064static int phy_alloc_counts(struct phy *phy, struct udevice *supply)
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030065{
66 struct list_head *uc_priv;
67 struct phy_counts *counts;
68
69 if (!generic_phy_valid(phy))
70 return 0;
71 if (phy_get_counts(phy))
72 return 0;
73
74 uc_priv = dev_get_uclass_priv(phy->dev);
75 counts = kzalloc(sizeof(*counts), GFP_KERNEL);
76 if (!counts)
77 return -ENOMEM;
78
79 counts->id = phy->id;
80 counts->power_on_count = 0;
81 counts->init_count = 0;
Eugen Hristevc57e0dc2023-05-15 12:59:47 +030082 counts->supply = supply;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +030083 list_add(&counts->list, uc_priv);
84
85 return 0;
86}
87
88static int phy_uclass_pre_probe(struct udevice *dev)
89{
90 struct list_head *uc_priv = dev_get_uclass_priv(dev);
91
92 INIT_LIST_HEAD(uc_priv);
93
94 return 0;
95}
96
97static int phy_uclass_pre_remove(struct udevice *dev)
98{
99 struct list_head *uc_priv = dev_get_uclass_priv(dev);
100 struct phy_counts *counts, *next;
101
102 list_for_each_entry_safe(counts, next, uc_priv, list)
103 kfree(counts);
104
105 return 0;
106}
107
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200108static int generic_phy_xlate_offs_flags(struct phy *phy,
Simon Glass23558bb2017-05-18 20:09:47 -0600109 struct ofnode_phandle_args *args)
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200110{
111 debug("%s(phy=%p)\n", __func__, phy);
112
113 if (args->args_count > 1) {
Sean Anderson46ad7ce2021-12-01 14:26:53 -0500114 debug("Invalid args_count: %d\n", args->args_count);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200115 return -EINVAL;
116 }
117
118 if (args->args_count)
119 phy->id = args->args[0];
120 else
121 phy->id = 0;
122
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200123 return 0;
124}
125
Jagan Teki5a2b6772020-05-01 23:44:18 +0530126int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200127{
Simon Glass23558bb2017-05-18 20:09:47 -0600128 struct ofnode_phandle_args args;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200129 struct phy_ops *ops;
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300130 struct udevice *phydev, *supply = NULL;
Patrice Chotarda1b2fae2018-06-27 11:55:42 +0200131 int i, ret;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200132
Neil Armstrongc2b9aa92020-03-30 11:27:23 +0200133 debug("%s(node=%s, index=%d, phy=%p)\n",
134 __func__, ofnode_get_name(node), index, phy);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200135
136 assert(phy);
Patrice Chotardb9688df2017-07-18 11:38:42 +0200137 phy->dev = NULL;
Neil Armstrongc2b9aa92020-03-30 11:27:23 +0200138 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
139 index, &args);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200140 if (ret) {
Simon Glass23558bb2017-05-18 20:09:47 -0600141 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200142 __func__, ret);
143 return ret;
144 }
145
Simon Glass23558bb2017-05-18 20:09:47 -0600146 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200147 if (ret) {
Simon Glass23558bb2017-05-18 20:09:47 -0600148 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200149 __func__, ret);
Patrice Chotarda1b2fae2018-06-27 11:55:42 +0200150
151 /* Check if args.node's parent is a PHY provider */
152 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
153 ofnode_get_parent(args.node),
154 &phydev);
155 if (ret)
156 return ret;
157
158 /* insert phy idx at first position into args array */
Marek Vasut5e50adf2018-08-07 12:24:35 +0200159 for (i = args.args_count; i >= 1 ; i--)
Patrice Chotarda1b2fae2018-06-27 11:55:42 +0200160 args.args[i] = args.args[i - 1];
161
162 args.args_count++;
163 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200164 }
165
166 phy->dev = phydev;
167
168 ops = phy_dev_ops(phydev);
169
170 if (ops->of_xlate)
171 ret = ops->of_xlate(phy, &args);
172 else
173 ret = generic_phy_xlate_offs_flags(phy, &args);
174 if (ret) {
175 debug("of_xlate() failed: %d\n", ret);
176 goto err;
177 }
178
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300179 if (CONFIG_IS_ENABLED(DM_REGULATOR)) {
180 ret = device_get_supply_regulator(phydev, "phy-supply",
181 &supply);
182 if (ret && ret != -ENOENT) {
183 debug("%s: device_get_supply_regulator failed: %d\n",
184 __func__, ret);
185 goto err;
186 }
187 }
188
189 ret = phy_alloc_counts(phy, supply);
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300190 if (ret) {
191 debug("phy_alloc_counts() failed: %d\n", ret);
192 goto err;
193 }
194
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200195 return 0;
196
197err:
198 return ret;
199}
200
Neil Armstrongc2b9aa92020-03-30 11:27:23 +0200201int generic_phy_get_by_index(struct udevice *dev, int index,
202 struct phy *phy)
203{
Jagan Teki5a2b6772020-05-01 23:44:18 +0530204 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
Neil Armstrongc2b9aa92020-03-30 11:27:23 +0200205}
206
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200207int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
208 struct phy *phy)
209{
210 int index;
211
212 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
213
Simon Glass23558bb2017-05-18 20:09:47 -0600214 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200215 if (index < 0) {
Simon Glass23558bb2017-05-18 20:09:47 -0600216 debug("dev_read_stringlist_search() failed: %d\n", index);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200217 return index;
218 }
219
220 return generic_phy_get_by_index(dev, index, phy);
221}
222
223int generic_phy_init(struct phy *phy)
224{
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300225 struct phy_counts *counts;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200226 struct phy_ops const *ops;
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200227 int ret;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200228
Vignesh Raghavendra64b69f82020-05-20 22:35:41 +0530229 if (!generic_phy_valid(phy))
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200230 return 0;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300231 counts = phy_get_counts(phy);
232 if (counts->init_count > 0) {
233 counts->init_count++;
234 return 0;
235 }
236
Jonas Karlmane74ac442023-05-15 12:59:50 +0300237 ops = phy_dev_ops(phy->dev);
238 if (ops->init) {
239 ret = ops->init(phy);
240 if (ret) {
241 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
242 phy->dev->name, ret);
243 return ret;
244 }
245 }
246 counts->init_count = 1;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200247
Jonas Karlmane74ac442023-05-15 12:59:50 +0300248 return 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200249}
250
251int generic_phy_reset(struct phy *phy)
252{
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200253 struct phy_ops const *ops;
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200254 int ret;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200255
Vignesh Raghavendra64b69f82020-05-20 22:35:41 +0530256 if (!generic_phy_valid(phy))
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200257 return 0;
258 ops = phy_dev_ops(phy->dev);
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200259 if (!ops->reset)
260 return 0;
261 ret = ops->reset(phy);
262 if (ret)
263 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
264 phy->dev->name, ret);
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200265
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200266 return ret;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200267}
268
269int generic_phy_exit(struct phy *phy)
270{
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300271 struct phy_counts *counts;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200272 struct phy_ops const *ops;
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200273 int ret;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200274
Vignesh Raghavendra64b69f82020-05-20 22:35:41 +0530275 if (!generic_phy_valid(phy))
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200276 return 0;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300277 counts = phy_get_counts(phy);
278 if (counts->init_count == 0)
279 return 0;
280 if (counts->init_count > 1) {
281 counts->init_count--;
282 return 0;
283 }
284
Jonas Karlmane74ac442023-05-15 12:59:50 +0300285 ops = phy_dev_ops(phy->dev);
286 if (ops->exit) {
287 ret = ops->exit(phy);
288 if (ret) {
289 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
290 phy->dev->name, ret);
291 return ret;
292 }
293 }
294 counts->init_count = 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200295
Jonas Karlmane74ac442023-05-15 12:59:50 +0300296 return 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200297}
298
299int generic_phy_power_on(struct phy *phy)
300{
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300301 struct phy_counts *counts;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200302 struct phy_ops const *ops;
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200303 int ret;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200304
Vignesh Raghavendra64b69f82020-05-20 22:35:41 +0530305 if (!generic_phy_valid(phy))
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200306 return 0;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300307 counts = phy_get_counts(phy);
308 if (counts->power_on_count > 0) {
309 counts->power_on_count++;
310 return 0;
311 }
312
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300313 ret = regulator_set_enable_if_allowed(counts->supply, true);
314 if (ret && ret != -ENOSYS) {
315 dev_err(phy->dev, "PHY: Failed to enable regulator %s: %d.\n",
316 counts->supply->name, ret);
317 return ret;
318 }
319
Jonas Karlmane74ac442023-05-15 12:59:50 +0300320 ops = phy_dev_ops(phy->dev);
321 if (ops->power_on) {
322 ret = ops->power_on(phy);
323 if (ret) {
324 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
325 phy->dev->name, ret);
326 regulator_set_enable_if_allowed(counts->supply, false);
327 return ret;
328 }
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300329 }
330 counts->power_on_count = 1;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200331
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300332 return 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200333}
334
335int generic_phy_power_off(struct phy *phy)
336{
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300337 struct phy_counts *counts;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200338 struct phy_ops const *ops;
Patrick Delaunay2041ae52020-07-03 17:36:40 +0200339 int ret;
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200340
Vignesh Raghavendra64b69f82020-05-20 22:35:41 +0530341 if (!generic_phy_valid(phy))
Jean-Jacques Hiblot4e184292019-10-01 14:03:26 +0200342 return 0;
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300343 counts = phy_get_counts(phy);
344 if (counts->power_on_count == 0)
345 return 0;
346 if (counts->power_on_count > 1) {
347 counts->power_on_count--;
348 return 0;
349 }
350
Jonas Karlmane74ac442023-05-15 12:59:50 +0300351 ops = phy_dev_ops(phy->dev);
352 if (ops->power_off) {
353 ret = ops->power_off(phy);
354 if (ret) {
355 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
356 phy->dev->name, ret);
357 return ret;
358 }
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300359 }
360 counts->power_on_count = 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200361
Eugen Hristevc57e0dc2023-05-15 12:59:47 +0300362 ret = regulator_set_enable_if_allowed(counts->supply, false);
363 if (ret && ret != -ENOSYS)
364 dev_err(phy->dev, "PHY: Failed to disable regulator %s: %d.\n",
365 counts->supply->name, ret);
366
367 return 0;
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200368}
369
Neil Armstrongf8da8a82020-12-29 14:58:59 +0100370int generic_phy_configure(struct phy *phy, void *params)
371{
372 struct phy_ops const *ops;
373
374 if (!generic_phy_valid(phy))
375 return 0;
376 ops = phy_dev_ops(phy->dev);
377
378 return ops->configure ? ops->configure(phy, params) : 0;
379}
380
Marek Vasutb0177a22023-03-19 18:09:42 +0100381int generic_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
382{
383 struct phy_ops const *ops;
384
385 if (!generic_phy_valid(phy))
386 return 0;
387 ops = phy_dev_ops(phy->dev);
388
389 return ops->set_mode ? ops->set_mode(phy, mode, submode) : 0;
390}
391
392int generic_phy_set_speed(struct phy *phy, int speed)
393{
394 struct phy_ops const *ops;
395
396 if (!generic_phy_valid(phy))
397 return 0;
398 ops = phy_dev_ops(phy->dev);
399
400 return ops->set_speed ? ops->set_speed(phy, speed) : 0;
401}
402
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200403int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
404{
405 int i, ret, count;
Angus Ainslie606a14b2022-02-03 10:08:38 -0800406 struct udevice *phydev = dev;
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200407
408 bulk->count = 0;
409
410 /* Return if no phy declared */
Angus Ainslie606a14b2022-02-03 10:08:38 -0800411 if (!dev_read_prop(dev, "phys", NULL)) {
412 phydev = dev->parent;
413 if (!dev_read_prop(phydev, "phys", NULL)) {
414 pr_err("%s : no phys property\n", __func__);
415 return 0;
416 }
417 }
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200418
Angus Ainslie606a14b2022-02-03 10:08:38 -0800419 count = dev_count_phandle_with_args(phydev, "phys", "#phy-cells", 0);
420 if (count < 1) {
421 pr_err("%s : no phys found %d\n", __func__, count);
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200422 return count;
Angus Ainslie606a14b2022-02-03 10:08:38 -0800423 }
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200424
Angus Ainslie606a14b2022-02-03 10:08:38 -0800425 bulk->phys = devm_kcalloc(phydev, count, sizeof(struct phy), GFP_KERNEL);
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200426 if (!bulk->phys)
427 return -ENOMEM;
428
429 for (i = 0; i < count; i++) {
Angus Ainslie606a14b2022-02-03 10:08:38 -0800430 ret = generic_phy_get_by_index(phydev, i, &bulk->phys[i]);
Chunfeng Yunb13307b2020-05-02 11:35:11 +0200431 if (ret) {
432 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
433 return ret;
434 }
435 bulk->count++;
436 }
437
438 return 0;
439}
440
441int generic_phy_init_bulk(struct phy_bulk *bulk)
442{
443 struct phy *phys = bulk->phys;
444 int i, ret;
445
446 for (i = 0; i < bulk->count; i++) {
447 ret = generic_phy_init(&phys[i]);
448 if (ret) {
449 pr_err("Can't init PHY%d\n", i);
450 goto phys_init_err;
451 }
452 }
453
454 return 0;
455
456phys_init_err:
457 for (; i > 0; i--)
458 generic_phy_exit(&phys[i - 1]);
459
460 return ret;
461}
462
463int generic_phy_exit_bulk(struct phy_bulk *bulk)
464{
465 struct phy *phys = bulk->phys;
466 int i, ret = 0;
467
468 for (i = 0; i < bulk->count; i++)
469 ret |= generic_phy_exit(&phys[i]);
470
471 return ret;
472}
473
474int generic_phy_power_on_bulk(struct phy_bulk *bulk)
475{
476 struct phy *phys = bulk->phys;
477 int i, ret;
478
479 for (i = 0; i < bulk->count; i++) {
480 ret = generic_phy_power_on(&phys[i]);
481 if (ret) {
482 pr_err("Can't power on PHY%d\n", i);
483 goto phys_poweron_err;
484 }
485 }
486
487 return 0;
488
489phys_poweron_err:
490 for (; i > 0; i--)
491 generic_phy_power_off(&phys[i - 1]);
492
493 return ret;
494}
495
496int generic_phy_power_off_bulk(struct phy_bulk *bulk)
497{
498 struct phy *phys = bulk->phys;
499 int i, ret = 0;
500
501 for (i = 0; i < bulk->count; i++)
502 ret |= generic_phy_power_off(&phys[i]);
503
504 return ret;
505}
506
Patrice Chotard84e56142022-09-06 08:15:26 +0200507int generic_setup_phy(struct udevice *dev, struct phy *phy, int index)
508{
509 int ret = 0;
510
511 if (!phy)
512 return 0;
513
514 ret = generic_phy_get_by_index(dev, index, phy);
515 if (ret) {
516 if (ret != -ENOENT)
517 return ret;
518 } else {
519 ret = generic_phy_init(phy);
520 if (ret)
521 return ret;
522
523 ret = generic_phy_power_on(phy);
524 if (ret)
525 ret = generic_phy_exit(phy);
526 }
527
528 return ret;
529}
530
531int generic_shutdown_phy(struct phy *phy)
532{
533 int ret = 0;
534
535 if (!phy)
536 return 0;
537
538 if (generic_phy_valid(phy)) {
539 ret = generic_phy_power_off(phy);
540 if (ret)
541 return ret;
542
543 ret = generic_phy_exit(phy);
544 }
545
546 return ret;
547}
548
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200549UCLASS_DRIVER(phy) = {
550 .id = UCLASS_PHY,
551 .name = "phy",
Alper Nebi Yasak226fce62021-12-30 22:36:51 +0300552 .pre_probe = phy_uclass_pre_probe,
553 .pre_remove = phy_uclass_pre_remove,
554 .per_device_auto = sizeof(struct list_head),
Jean-Jacques Hiblot72e50162017-04-24 11:51:27 +0200555};