blob: bd33d80ab4b0b42c1c6ff10645feeed970575637 [file] [log] [blame]
Grygorii Strashkocbec53b2018-10-31 16:21:42 -05001// SPDX-License-Identifier: GPL-2.0+
Cyril Chemparathy2b629972012-07-24 12:22:16 +00002/*
3 * CPSW Ethernet Switch Driver
4 *
Grygorii Strashkocbec53b2018-10-31 16:21:42 -05005 * Copyright (C) 2010-2018 Texas Instruments Incorporated - http://www.ti.com/
Cyril Chemparathy2b629972012-07-24 12:22:16 +00006 */
7
8#include <common.h>
9#include <command.h>
10#include <net.h>
11#include <miiphy.h>
12#include <malloc.h>
13#include <net.h>
14#include <netdev.h>
15#include <cpsw.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090016#include <linux/errno.h>
Vignesh R2e205ef2016-08-02 10:14:27 +053017#include <asm/gpio.h>
Cyril Chemparathy2b629972012-07-24 12:22:16 +000018#include <asm/io.h>
19#include <phy.h>
Tom Rini98f92002013-03-14 11:15:25 +000020#include <asm/arch/cpu.h>
Mugunthan V N4cc77892015-09-07 14:22:21 +053021#include <dm.h>
Mugunthan V Ne4310562016-04-28 15:36:07 +053022#include <fdt_support.h>
Mugunthan V N4cc77892015-09-07 14:22:21 +053023
Grygorii Strashko4f41cd92018-10-31 16:21:44 -050024#include "cpsw_mdio.h"
25
Mugunthan V N4cc77892015-09-07 14:22:21 +053026DECLARE_GLOBAL_DATA_PTR;
Cyril Chemparathy2b629972012-07-24 12:22:16 +000027
28#define BITMASK(bits) (BIT(bits) - 1)
Cyril Chemparathy2b629972012-07-24 12:22:16 +000029#define NUM_DESCS (PKTBUFSRX * 2)
30#define PKT_MIN 60
31#define PKT_MAX (1500 + 14 + 4 + 4)
32#define CLEAR_BIT 1
33#define GIGABITEN BIT(7)
34#define FULLDUPLEXEN BIT(0)
35#define MIIEN BIT(15)
36
Mugunthan V N4cc77892015-09-07 14:22:21 +053037/* reg offset */
38#define CPSW_HOST_PORT_OFFSET 0x108
39#define CPSW_SLAVE0_OFFSET 0x208
40#define CPSW_SLAVE1_OFFSET 0x308
41#define CPSW_SLAVE_SIZE 0x100
42#define CPSW_CPDMA_OFFSET 0x800
43#define CPSW_HW_STATS 0x900
44#define CPSW_STATERAM_OFFSET 0xa00
45#define CPSW_CPTS_OFFSET 0xc00
46#define CPSW_ALE_OFFSET 0xd00
47#define CPSW_SLIVER0_OFFSET 0xd80
48#define CPSW_SLIVER1_OFFSET 0xdc0
49#define CPSW_BD_OFFSET 0x2000
50#define CPSW_MDIO_DIV 0xff
51
52#define AM335X_GMII_SEL_OFFSET 0x630
53
Cyril Chemparathy2b629972012-07-24 12:22:16 +000054/* DMA Registers */
55#define CPDMA_TXCONTROL 0x004
56#define CPDMA_RXCONTROL 0x014
57#define CPDMA_SOFTRESET 0x01c
58#define CPDMA_RXFREE 0x0e0
59#define CPDMA_TXHDP_VER1 0x100
60#define CPDMA_TXHDP_VER2 0x200
61#define CPDMA_RXHDP_VER1 0x120
62#define CPDMA_RXHDP_VER2 0x220
63#define CPDMA_TXCP_VER1 0x140
64#define CPDMA_TXCP_VER2 0x240
65#define CPDMA_RXCP_VER1 0x160
66#define CPDMA_RXCP_VER2 0x260
67
Cyril Chemparathy2b629972012-07-24 12:22:16 +000068/* Descriptor mode bits */
69#define CPDMA_DESC_SOP BIT(31)
70#define CPDMA_DESC_EOP BIT(30)
71#define CPDMA_DESC_OWNER BIT(29)
72#define CPDMA_DESC_EOQ BIT(28)
73
74/*
75 * This timeout definition is a worst-case ultra defensive measure against
76 * unexpected controller lock ups. Ideally, we should never ever hit this
77 * scenario in practice.
78 */
Cyril Chemparathy2b629972012-07-24 12:22:16 +000079#define CPDMA_TIMEOUT 100 /* msecs */
80
Cyril Chemparathy2b629972012-07-24 12:22:16 +000081struct cpsw_regs {
82 u32 id_ver;
83 u32 control;
84 u32 soft_reset;
85 u32 stat_port_en;
86 u32 ptype;
87};
88
89struct cpsw_slave_regs {
90 u32 max_blks;
91 u32 blk_cnt;
92 u32 flow_thresh;
93 u32 port_vlan;
94 u32 tx_pri_map;
Matt Porterf6f86a62013-03-20 05:38:12 +000095#ifdef CONFIG_AM33XX
Cyril Chemparathy2b629972012-07-24 12:22:16 +000096 u32 gap_thresh;
Matt Porterf6f86a62013-03-20 05:38:12 +000097#elif defined(CONFIG_TI814X)
98 u32 ts_ctl;
99 u32 ts_seq_ltype;
100 u32 ts_vlan;
101#endif
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000102 u32 sa_lo;
103 u32 sa_hi;
104};
105
106struct cpsw_host_regs {
107 u32 max_blks;
108 u32 blk_cnt;
109 u32 flow_thresh;
110 u32 port_vlan;
111 u32 tx_pri_map;
112 u32 cpdma_tx_pri_map;
113 u32 cpdma_rx_chan_map;
114};
115
116struct cpsw_sliver_regs {
117 u32 id_ver;
118 u32 mac_control;
119 u32 mac_status;
120 u32 soft_reset;
121 u32 rx_maxlen;
122 u32 __reserved_0;
123 u32 rx_pause;
124 u32 tx_pause;
125 u32 __reserved_1;
126 u32 rx_pri_map;
127};
128
129#define ALE_ENTRY_BITS 68
130#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
131
132/* ALE Registers */
133#define ALE_CONTROL 0x08
134#define ALE_UNKNOWNVLAN 0x18
135#define ALE_TABLE_CONTROL 0x20
136#define ALE_TABLE 0x34
137#define ALE_PORTCTL 0x40
138
139#define ALE_TABLE_WRITE BIT(31)
140
141#define ALE_TYPE_FREE 0
142#define ALE_TYPE_ADDR 1
143#define ALE_TYPE_VLAN 2
144#define ALE_TYPE_VLAN_ADDR 3
145
146#define ALE_UCAST_PERSISTANT 0
147#define ALE_UCAST_UNTOUCHED 1
148#define ALE_UCAST_OUI 2
149#define ALE_UCAST_TOUCHED 3
150
151#define ALE_MCAST_FWD 0
152#define ALE_MCAST_BLOCK_LEARN_FWD 1
153#define ALE_MCAST_FWD_LEARN 2
154#define ALE_MCAST_FWD_2 3
155
156enum cpsw_ale_port_state {
157 ALE_PORT_STATE_DISABLE = 0x00,
158 ALE_PORT_STATE_BLOCK = 0x01,
159 ALE_PORT_STATE_LEARN = 0x02,
160 ALE_PORT_STATE_FORWARD = 0x03,
161};
162
163/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
164#define ALE_SECURE 1
165#define ALE_BLOCKED 2
166
167struct cpsw_slave {
168 struct cpsw_slave_regs *regs;
169 struct cpsw_sliver_regs *sliver;
170 int slave_num;
171 u32 mac_control;
172 struct cpsw_slave_data *data;
173};
174
175struct cpdma_desc {
176 /* hardware fields */
177 u32 hw_next;
178 u32 hw_buffer;
179 u32 hw_len;
180 u32 hw_mode;
181 /* software fields */
182 u32 sw_buffer;
183 u32 sw_len;
184};
185
186struct cpdma_chan {
187 struct cpdma_desc *head, *tail;
188 void *hdp, *cp, *rxfree;
189};
190
Mugunthan V Nab971532016-10-13 19:33:38 +0530191/* AM33xx SoC specific definitions for the CONTROL port */
192#define AM33XX_GMII_SEL_MODE_MII 0
193#define AM33XX_GMII_SEL_MODE_RMII 1
194#define AM33XX_GMII_SEL_MODE_RGMII 2
195
196#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
197#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
198#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
199#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
200
201#define GMII_SEL_MODE_MASK 0x3
202
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000203#define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
204#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
205#define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
206
207#define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
208#define chan_read(chan, fld) __raw_readl((chan)->fld)
209#define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
210
Mugunthan V N7a022752014-05-22 14:37:10 +0530211#define for_active_slave(slave, priv) \
212 slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000213#define for_each_slave(slave, priv) \
214 for (slave = (priv)->slaves; slave != (priv)->slaves + \
215 (priv)->data.slaves; slave++)
216
217struct cpsw_priv {
Mugunthan V N4cc77892015-09-07 14:22:21 +0530218#ifdef CONFIG_DM_ETH
219 struct udevice *dev;
220#else
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000221 struct eth_device *dev;
Mugunthan V N4cc77892015-09-07 14:22:21 +0530222#endif
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000223 struct cpsw_platform_data data;
224 int host_port;
225
226 struct cpsw_regs *regs;
227 void *dma_regs;
228 struct cpsw_host_regs *host_port_regs;
229 void *ale_regs;
230
231 struct cpdma_desc *descs;
232 struct cpdma_desc *desc_free;
233 struct cpdma_chan rx_chan, tx_chan;
234
235 struct cpsw_slave *slaves;
236 struct phy_device *phydev;
237 struct mii_dev *bus;
Mugunthan V N48ec5292013-02-19 21:34:44 +0000238
Mugunthan V N48ec5292013-02-19 21:34:44 +0000239 u32 phy_mask;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000240};
241
242static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
243{
244 int idx;
245
246 idx = start / 32;
247 start -= idx * 32;
248 idx = 2 - idx; /* flip */
249 return (ale_entry[idx] >> start) & BITMASK(bits);
250}
251
252static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
253 u32 value)
254{
255 int idx;
256
257 value &= BITMASK(bits);
258 idx = start / 32;
259 start -= idx * 32;
260 idx = 2 - idx; /* flip */
261 ale_entry[idx] &= ~(BITMASK(bits) << start);
262 ale_entry[idx] |= (value << start);
263}
264
265#define DEFINE_ALE_FIELD(name, start, bits) \
266static inline int cpsw_ale_get_##name(u32 *ale_entry) \
267{ \
268 return cpsw_ale_get_field(ale_entry, start, bits); \
269} \
270static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
271{ \
272 cpsw_ale_set_field(ale_entry, start, bits, value); \
273}
274
275DEFINE_ALE_FIELD(entry_type, 60, 2)
276DEFINE_ALE_FIELD(mcast_state, 62, 2)
277DEFINE_ALE_FIELD(port_mask, 66, 3)
278DEFINE_ALE_FIELD(ucast_type, 62, 2)
279DEFINE_ALE_FIELD(port_num, 66, 2)
280DEFINE_ALE_FIELD(blocked, 65, 1)
281DEFINE_ALE_FIELD(secure, 64, 1)
282DEFINE_ALE_FIELD(mcast, 40, 1)
283
284/* The MAC address field in the ALE entry cannot be macroized as above */
285static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
286{
287 int i;
288
289 for (i = 0; i < 6; i++)
290 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
291}
292
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500293static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000294{
295 int i;
296
297 for (i = 0; i < 6; i++)
298 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
299}
300
301static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
302{
303 int i;
304
305 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
306
307 for (i = 0; i < ALE_ENTRY_WORDS; i++)
308 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
309
310 return idx;
311}
312
313static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
314{
315 int i;
316
317 for (i = 0; i < ALE_ENTRY_WORDS; i++)
318 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
319
320 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
321
322 return idx;
323}
324
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500325static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000326{
327 u32 ale_entry[ALE_ENTRY_WORDS];
328 int type, idx;
329
330 for (idx = 0; idx < priv->data.ale_entries; idx++) {
331 u8 entry_addr[6];
332
333 cpsw_ale_read(priv, idx, ale_entry);
334 type = cpsw_ale_get_entry_type(ale_entry);
335 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
336 continue;
337 cpsw_ale_get_addr(ale_entry, entry_addr);
338 if (memcmp(entry_addr, addr, 6) == 0)
339 return idx;
340 }
341 return -ENOENT;
342}
343
344static int cpsw_ale_match_free(struct cpsw_priv *priv)
345{
346 u32 ale_entry[ALE_ENTRY_WORDS];
347 int type, idx;
348
349 for (idx = 0; idx < priv->data.ale_entries; idx++) {
350 cpsw_ale_read(priv, idx, ale_entry);
351 type = cpsw_ale_get_entry_type(ale_entry);
352 if (type == ALE_TYPE_FREE)
353 return idx;
354 }
355 return -ENOENT;
356}
357
358static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
359{
360 u32 ale_entry[ALE_ENTRY_WORDS];
361 int type, idx;
362
363 for (idx = 0; idx < priv->data.ale_entries; idx++) {
364 cpsw_ale_read(priv, idx, ale_entry);
365 type = cpsw_ale_get_entry_type(ale_entry);
366 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
367 continue;
368 if (cpsw_ale_get_mcast(ale_entry))
369 continue;
370 type = cpsw_ale_get_ucast_type(ale_entry);
371 if (type != ALE_UCAST_PERSISTANT &&
372 type != ALE_UCAST_OUI)
373 return idx;
374 }
375 return -ENOENT;
376}
377
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500378static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000379 int port, int flags)
380{
381 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
382 int idx;
383
384 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
385 cpsw_ale_set_addr(ale_entry, addr);
386 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
387 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
388 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
389 cpsw_ale_set_port_num(ale_entry, port);
390
391 idx = cpsw_ale_match_addr(priv, addr);
392 if (idx < 0)
393 idx = cpsw_ale_match_free(priv);
394 if (idx < 0)
395 idx = cpsw_ale_find_ageable(priv);
396 if (idx < 0)
397 return -ENOMEM;
398
399 cpsw_ale_write(priv, idx, ale_entry);
400 return 0;
401}
402
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500403static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
404 int port_mask)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000405{
406 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
407 int idx, mask;
408
409 idx = cpsw_ale_match_addr(priv, addr);
410 if (idx >= 0)
411 cpsw_ale_read(priv, idx, ale_entry);
412
413 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
414 cpsw_ale_set_addr(ale_entry, addr);
415 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
416
417 mask = cpsw_ale_get_port_mask(ale_entry);
418 port_mask |= mask;
419 cpsw_ale_set_port_mask(ale_entry, port_mask);
420
421 if (idx < 0)
422 idx = cpsw_ale_match_free(priv);
423 if (idx < 0)
424 idx = cpsw_ale_find_ageable(priv);
425 if (idx < 0)
426 return -ENOMEM;
427
428 cpsw_ale_write(priv, idx, ale_entry);
429 return 0;
430}
431
432static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
433{
434 u32 tmp, mask = BIT(bit);
435
436 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
437 tmp &= ~mask;
438 tmp |= val ? mask : 0;
439 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
440}
441
442#define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
443#define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
444#define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
445
446static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
447 int val)
448{
449 int offset = ALE_PORTCTL + 4 * port;
450 u32 tmp, mask = 0x3;
451
452 tmp = __raw_readl(priv->ale_regs + offset);
453 tmp &= ~mask;
454 tmp |= val & mask;
455 __raw_writel(tmp, priv->ale_regs + offset);
456}
457
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000458/* Set a self-clearing bit in a register, and wait for it to clear */
459static inline void setbit_and_wait_for_clear32(void *addr)
460{
461 __raw_writel(CLEAR_BIT, addr);
462 while (__raw_readl(addr) & CLEAR_BIT)
463 ;
464}
465
466#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
467 ((mac)[2] << 16) | ((mac)[3] << 24))
468#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
469
470static void cpsw_set_slave_mac(struct cpsw_slave *slave,
471 struct cpsw_priv *priv)
472{
Mugunthan V N4cc77892015-09-07 14:22:21 +0530473#ifdef CONFIG_DM_ETH
474 struct eth_pdata *pdata = dev_get_platdata(priv->dev);
475
476 writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
477 writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
478#else
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000479 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
480 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
Mugunthan V N4cc77892015-09-07 14:22:21 +0530481#endif
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000482}
483
Sekhar Nori96d1d842017-05-08 20:49:56 +0530484static int cpsw_slave_update_link(struct cpsw_slave *slave,
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000485 struct cpsw_priv *priv, int *link)
486{
Heiko Schocher93ff2552013-09-05 11:50:41 +0200487 struct phy_device *phy;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000488 u32 mac_control = 0;
Sekhar Nori96d1d842017-05-08 20:49:56 +0530489 int ret = -ENODEV;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000490
Heiko Schocher93ff2552013-09-05 11:50:41 +0200491 phy = priv->phydev;
Heiko Schocher93ff2552013-09-05 11:50:41 +0200492 if (!phy)
Sekhar Nori96d1d842017-05-08 20:49:56 +0530493 goto out;
Heiko Schocher93ff2552013-09-05 11:50:41 +0200494
Sekhar Nori96d1d842017-05-08 20:49:56 +0530495 ret = phy_startup(phy);
496 if (ret)
497 goto out;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000498
Sekhar Nori96d1d842017-05-08 20:49:56 +0530499 if (link)
500 *link = phy->link;
501
502 if (phy->link) { /* link up */
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000503 mac_control = priv->data.mac_control;
504 if (phy->speed == 1000)
505 mac_control |= GIGABITEN;
506 if (phy->duplex == DUPLEX_FULL)
507 mac_control |= FULLDUPLEXEN;
508 if (phy->speed == 100)
509 mac_control |= MIIEN;
510 }
511
512 if (mac_control == slave->mac_control)
Sekhar Nori96d1d842017-05-08 20:49:56 +0530513 goto out;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000514
515 if (mac_control) {
516 printf("link up on port %d, speed %d, %s duplex\n",
517 slave->slave_num, phy->speed,
518 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
519 } else {
520 printf("link down on port %d\n", slave->slave_num);
521 }
522
523 __raw_writel(mac_control, &slave->sliver->mac_control);
524 slave->mac_control = mac_control;
Sekhar Nori96d1d842017-05-08 20:49:56 +0530525
526out:
527 return ret;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000528}
529
530static int cpsw_update_link(struct cpsw_priv *priv)
531{
Sekhar Nori96d1d842017-05-08 20:49:56 +0530532 int ret = -ENODEV;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000533 struct cpsw_slave *slave;
534
Mugunthan V N7a022752014-05-22 14:37:10 +0530535 for_active_slave(slave, priv)
Sekhar Nori96d1d842017-05-08 20:49:56 +0530536 ret = cpsw_slave_update_link(slave, priv, NULL);
Stefan Roese5a834c12014-08-25 11:26:19 +0200537
Sekhar Nori96d1d842017-05-08 20:49:56 +0530538 return ret;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000539}
540
541static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
542{
543 if (priv->host_port == 0)
544 return slave_num + 1;
545 else
546 return slave_num;
547}
548
549static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
550{
551 u32 slave_port;
552
553 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
554
555 /* setup priority mapping */
556 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
557 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
558
559 /* setup max packet size, and mac address */
560 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
561 cpsw_set_slave_mac(slave, priv);
562
563 slave->mac_control = 0; /* no link yet */
564
565 /* enable forwarding */
566 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
567 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
568
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500569 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
Mugunthan V N48ec5292013-02-19 21:34:44 +0000570
Mugunthan V N9c653aa2014-02-18 07:31:52 -0500571 priv->phy_mask |= 1 << slave->data->phy_addr;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000572}
573
574static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
575{
576 struct cpdma_desc *desc = priv->desc_free;
577
578 if (desc)
579 priv->desc_free = desc_read_ptr(desc, hw_next);
580 return desc;
581}
582
583static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
584{
585 if (desc) {
586 desc_write(desc, hw_next, priv->desc_free);
587 priv->desc_free = desc;
588 }
589}
590
591static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
592 void *buffer, int len)
593{
594 struct cpdma_desc *desc, *prev;
595 u32 mode;
596
597 desc = cpdma_desc_alloc(priv);
598 if (!desc)
599 return -ENOMEM;
600
601 if (len < PKT_MIN)
602 len = PKT_MIN;
603
604 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
605
606 desc_write(desc, hw_next, 0);
607 desc_write(desc, hw_buffer, buffer);
608 desc_write(desc, hw_len, len);
609 desc_write(desc, hw_mode, mode | len);
610 desc_write(desc, sw_buffer, buffer);
611 desc_write(desc, sw_len, len);
612
613 if (!chan->head) {
614 /* simple case - first packet enqueued */
615 chan->head = desc;
616 chan->tail = desc;
617 chan_write(chan, hdp, desc);
618 goto done;
619 }
620
621 /* not the first packet - enqueue at the tail */
622 prev = chan->tail;
623 desc_write(prev, hw_next, desc);
624 chan->tail = desc;
625
626 /* next check if EOQ has been triggered already */
627 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
628 chan_write(chan, hdp, desc);
629
630done:
631 if (chan->rxfree)
632 chan_write(chan, rxfree, 1);
633 return 0;
634}
635
636static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
637 void **buffer, int *len)
638{
639 struct cpdma_desc *desc = chan->head;
640 u32 status;
641
642 if (!desc)
643 return -ENOENT;
644
645 status = desc_read(desc, hw_mode);
646
647 if (len)
648 *len = status & 0x7ff;
649
650 if (buffer)
651 *buffer = desc_read_ptr(desc, sw_buffer);
652
653 if (status & CPDMA_DESC_OWNER) {
654 if (chan_read(chan, hdp) == 0) {
655 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
656 chan_write(chan, hdp, desc);
657 }
658
659 return -EBUSY;
660 }
661
662 chan->head = desc_read_ptr(desc, hw_next);
663 chan_write(chan, cp, desc);
664
665 cpdma_desc_free(priv, desc);
666 return 0;
667}
668
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530669static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000670{
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000671 struct cpsw_slave *slave;
672 int i, ret;
673
674 /* soft reset the controller and initialize priv */
675 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
676
677 /* initialize and reset the address lookup engine */
678 cpsw_ale_enable(priv, 1);
679 cpsw_ale_clear(priv, 1);
680 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
681
682 /* setup host port priority mapping */
683 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
684 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
685
686 /* disable priority elevation and enable statistics on all ports */
687 __raw_writel(0, &priv->regs->ptype);
688
689 /* enable statistics collection only on the host port */
690 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
Mugunthan V N454ac632013-07-08 16:04:38 +0530691 __raw_writel(0x7, &priv->regs->stat_port_en);
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000692
693 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
694
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530695 cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
Joe Hershberger0adb5b72015-04-08 01:41:04 -0500696 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000697
Mugunthan V N7a022752014-05-22 14:37:10 +0530698 for_active_slave(slave, priv)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000699 cpsw_slave_init(slave, priv);
700
Sekhar Nori96d1d842017-05-08 20:49:56 +0530701 ret = cpsw_update_link(priv);
702 if (ret)
703 goto out;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000704
705 /* init descriptor pool */
706 for (i = 0; i < NUM_DESCS; i++) {
707 desc_write(&priv->descs[i], hw_next,
708 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
709 }
710 priv->desc_free = &priv->descs[0];
711
712 /* initialize channels */
713 if (priv->data.version == CPSW_CTRL_VERSION_2) {
714 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
715 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
716 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
717 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
718
719 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
720 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
721 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
722 } else {
723 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
724 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
725 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
726 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
727
728 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
729 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
730 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
731 }
732
733 /* clear dma state */
734 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
735
736 if (priv->data.version == CPSW_CTRL_VERSION_2) {
737 for (i = 0; i < priv->data.channels; i++) {
738 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
739 * i);
740 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
741 * i);
742 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
743 * i);
744 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
745 * i);
746 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
747 * i);
748 }
749 } else {
750 for (i = 0; i < priv->data.channels; i++) {
751 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
752 * i);
753 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
754 * i);
755 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
756 * i);
757 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
758 * i);
759 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
760 * i);
761
762 }
763 }
764
765 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
766 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
767
768 /* submit rx descs */
769 for (i = 0; i < PKTBUFSRX; i++) {
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500770 ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000771 PKTSIZE);
772 if (ret < 0) {
773 printf("error %d submitting rx desc\n", ret);
774 break;
775 }
776 }
777
Sekhar Nori96d1d842017-05-08 20:49:56 +0530778out:
779 return ret;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000780}
781
Alex Kiernan286bea22018-05-12 07:30:02 +0000782static int cpsw_reap_completed_packets(struct cpsw_priv *priv)
783{
784 int timeout = CPDMA_TIMEOUT;
785
786 /* reap completed packets */
787 while (timeout-- &&
788 (cpdma_process(priv, &priv->tx_chan, NULL, NULL) >= 0))
789 ;
790
791 return timeout;
792}
793
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530794static void _cpsw_halt(struct cpsw_priv *priv)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000795{
Alex Kiernan286bea22018-05-12 07:30:02 +0000796 cpsw_reap_completed_packets(priv);
797
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000798 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
799 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
800
801 /* soft reset the controller and initialize priv */
802 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
803
804 /* clear dma state */
805 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
806
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000807}
808
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530809static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000810{
Alex Kiernan286bea22018-05-12 07:30:02 +0000811 int timeout;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000812
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000813 flush_dcache_range((unsigned long)packet,
Lokesh Vutla1f019622016-08-11 13:00:59 +0530814 (unsigned long)packet + ALIGN(length, PKTALIGN));
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000815
Alex Kiernan286bea22018-05-12 07:30:02 +0000816 timeout = cpsw_reap_completed_packets(priv);
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000817 if (timeout == -1) {
818 printf("cpdma_process timeout\n");
819 return -ETIMEDOUT;
820 }
821
822 return cpdma_submit(priv, &priv->tx_chan, packet, length);
823}
824
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530825static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000826{
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000827 void *buffer;
828 int len;
Heinrich Schuchardt4b23d3c82018-03-18 11:24:38 +0100829 int ret;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000830
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530831 ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
832 if (ret < 0)
833 return ret;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000834
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530835 invalidate_dcache_range((unsigned long)buffer,
836 (unsigned long)buffer + PKTSIZE_ALIGN);
837 *pkt = buffer;
838
839 return len;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000840}
841
842static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
843 struct cpsw_priv *priv)
844{
845 void *regs = priv->regs;
846 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
847 slave->slave_num = slave_num;
848 slave->data = data;
849 slave->regs = regs + data->slave_reg_ofs;
850 slave->sliver = regs + data->sliver_reg_ofs;
851}
852
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530853static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000854{
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000855 struct phy_device *phydev;
Ilya Ledvichef59bb72014-03-12 11:26:30 +0200856 u32 supported = PHY_GBIT_FEATURES;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000857
Yegor Yefremovcdd07292012-11-26 04:03:16 +0000858 phydev = phy_connect(priv->bus,
Mugunthan V N9c653aa2014-02-18 07:31:52 -0500859 slave->data->phy_addr,
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530860 priv->dev,
Yegor Yefremovcdd07292012-11-26 04:03:16 +0000861 slave->data->phy_if);
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000862
Heiko Schocher93ff2552013-09-05 11:50:41 +0200863 if (!phydev)
864 return -1;
865
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000866 phydev->supported &= supported;
867 phydev->advertising = phydev->supported;
868
Dan Murphycb386222016-05-02 15:45:56 -0500869#ifdef CONFIG_DM_ETH
870 if (slave->data->phy_of_handle)
Grygorii Strashkod4bb9812018-07-05 12:02:51 -0500871 phydev->node = offset_to_ofnode(slave->data->phy_of_handle);
Dan Murphycb386222016-05-02 15:45:56 -0500872#endif
873
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000874 priv->phydev = phydev;
875 phy_config(phydev);
876
877 return 1;
878}
879
Sekhar Norie2597be2018-08-23 17:11:29 +0530880static void cpsw_phy_addr_update(struct cpsw_priv *priv)
881{
882 struct cpsw_platform_data *data = &priv->data;
Grygorii Strashko4f41cd92018-10-31 16:21:44 -0500883 u16 alive = cpsw_mdio_get_alive(priv->bus);
Sekhar Norie2597be2018-08-23 17:11:29 +0530884 int active = data->active_slave;
885 int new_addr = ffs(alive) - 1;
886
887 /*
888 * If there is only one phy alive and its address does not match
889 * that of active slave, then phy address can safely be updated.
890 */
891 if (hweight16(alive) == 1 &&
892 data->slave_data[active].phy_addr != new_addr) {
893 printf("Updated phy address for CPSW#%d, old: %d, new: %d\n",
894 active, data->slave_data[active].phy_addr, new_addr);
895 data->slave_data[active].phy_addr = new_addr;
896 }
897}
898
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530899int _cpsw_register(struct cpsw_priv *priv)
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000900{
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000901 struct cpsw_slave *slave;
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530902 struct cpsw_platform_data *data = &priv->data;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000903 void *regs = (void *)data->cpsw_base;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000904
905 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
906 if (!priv->slaves) {
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000907 return -ENOMEM;
908 }
909
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000910 priv->host_port = data->host_port_num;
911 priv->regs = regs;
912 priv->host_port_regs = regs + data->host_port_reg_ofs;
913 priv->dma_regs = regs + data->cpdma_reg_ofs;
914 priv->ale_regs = regs + data->ale_reg_ofs;
Mugunthan V N2bf36ac2013-07-08 16:04:37 +0530915 priv->descs = (void *)regs + data->bd_ram_ofs;
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000916
917 int idx = 0;
918
919 for_each_slave(slave, priv) {
920 cpsw_slave_setup(slave, idx, priv);
921 idx = idx + 1;
922 }
923
Grygorii Strashko4f41cd92018-10-31 16:21:44 -0500924 priv->bus = cpsw_mdio_init(priv->dev->name, data->mdio_base, 0, 0);
925 if (!priv->bus)
926 return -EFAULT;
Sekhar Norie2597be2018-08-23 17:11:29 +0530927
928 cpsw_phy_addr_update(priv);
929
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530930 for_active_slave(slave, priv)
931 cpsw_phy_init(priv, slave);
932
933 return 0;
934}
935
Mugunthan V N4cc77892015-09-07 14:22:21 +0530936#ifndef CONFIG_DM_ETH
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +0530937static int cpsw_init(struct eth_device *dev, bd_t *bis)
938{
939 struct cpsw_priv *priv = dev->priv;
940
941 return _cpsw_init(priv, dev->enetaddr);
942}
943
944static void cpsw_halt(struct eth_device *dev)
945{
946 struct cpsw_priv *priv = dev->priv;
947
948 return _cpsw_halt(priv);
949}
950
951static int cpsw_send(struct eth_device *dev, void *packet, int length)
952{
953 struct cpsw_priv *priv = dev->priv;
954
955 return _cpsw_send(priv, packet, length);
956}
957
958static int cpsw_recv(struct eth_device *dev)
959{
960 struct cpsw_priv *priv = dev->priv;
961 uchar *pkt = NULL;
962 int len;
963
964 len = _cpsw_recv(priv, &pkt);
965
966 if (len > 0) {
967 net_process_received_packet(pkt, len);
968 cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
969 }
970
971 return len;
972}
973
974int cpsw_register(struct cpsw_platform_data *data)
975{
976 struct cpsw_priv *priv;
977 struct eth_device *dev;
978 int ret;
979
980 dev = calloc(sizeof(*dev), 1);
981 if (!dev)
982 return -ENOMEM;
983
984 priv = calloc(sizeof(*priv), 1);
985 if (!priv) {
986 free(dev);
987 return -ENOMEM;
988 }
989
990 priv->dev = dev;
991 priv->data = *data;
992
Cyril Chemparathy2b629972012-07-24 12:22:16 +0000993 strcpy(dev->name, "cpsw");
994 dev->iobase = 0;
995 dev->init = cpsw_init;
996 dev->halt = cpsw_halt;
997 dev->send = cpsw_send;
998 dev->recv = cpsw_recv;
999 dev->priv = priv;
1000
1001 eth_register(dev);
1002
Mugunthan V Nbcd5eed2015-09-07 14:22:20 +05301003 ret = _cpsw_register(priv);
1004 if (ret < 0) {
1005 eth_unregister(dev);
1006 free(dev);
1007 free(priv);
1008 return ret;
1009 }
Cyril Chemparathy2b629972012-07-24 12:22:16 +00001010
1011 return 1;
1012}
Mugunthan V N4cc77892015-09-07 14:22:21 +05301013#else
1014static int cpsw_eth_start(struct udevice *dev)
1015{
1016 struct eth_pdata *pdata = dev_get_platdata(dev);
1017 struct cpsw_priv *priv = dev_get_priv(dev);
1018
1019 return _cpsw_init(priv, pdata->enetaddr);
1020}
1021
1022static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1023{
1024 struct cpsw_priv *priv = dev_get_priv(dev);
1025
1026 return _cpsw_send(priv, packet, length);
1027}
1028
1029static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1030{
1031 struct cpsw_priv *priv = dev_get_priv(dev);
1032
1033 return _cpsw_recv(priv, packetp);
1034}
1035
1036static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1037 int length)
1038{
1039 struct cpsw_priv *priv = dev_get_priv(dev);
1040
1041 return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1042}
1043
1044static void cpsw_eth_stop(struct udevice *dev)
1045{
1046 struct cpsw_priv *priv = dev_get_priv(dev);
1047
1048 return _cpsw_halt(priv);
1049}
1050
Mugunthan V N4cc77892015-09-07 14:22:21 +05301051static const struct eth_ops cpsw_eth_ops = {
1052 .start = cpsw_eth_start,
1053 .send = cpsw_eth_send,
1054 .recv = cpsw_eth_recv,
1055 .free_pkt = cpsw_eth_free_pkt,
1056 .stop = cpsw_eth_stop,
1057};
1058
Mugunthan V N66e740c2016-04-28 15:36:06 +05301059static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1060{
Stephen Warren6e06acb2016-08-05 09:47:51 -06001061 return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
1062 false);
Mugunthan V N66e740c2016-04-28 15:36:06 +05301063}
1064
Mugunthan V Nab971532016-10-13 19:33:38 +05301065static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
1066 phy_interface_t phy_mode)
1067{
1068 u32 reg;
1069 u32 mask;
1070 u32 mode = 0;
1071 bool rgmii_id = false;
1072 int slave = priv->data.active_slave;
1073
1074 reg = readl(priv->data.gmii_sel);
1075
1076 switch (phy_mode) {
1077 case PHY_INTERFACE_MODE_RMII:
1078 mode = AM33XX_GMII_SEL_MODE_RMII;
1079 break;
1080
1081 case PHY_INTERFACE_MODE_RGMII:
1082 mode = AM33XX_GMII_SEL_MODE_RGMII;
1083 break;
1084 case PHY_INTERFACE_MODE_RGMII_ID:
1085 case PHY_INTERFACE_MODE_RGMII_RXID:
1086 case PHY_INTERFACE_MODE_RGMII_TXID:
1087 mode = AM33XX_GMII_SEL_MODE_RGMII;
1088 rgmii_id = true;
1089 break;
1090
1091 case PHY_INTERFACE_MODE_MII:
1092 default:
1093 mode = AM33XX_GMII_SEL_MODE_MII;
1094 break;
1095 };
1096
1097 mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
1098 mode <<= slave * 2;
1099
1100 if (priv->data.rmii_clock_external) {
1101 if (slave == 0)
1102 mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
1103 else
1104 mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
1105 }
1106
1107 if (rgmii_id) {
1108 if (slave == 0)
1109 mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
1110 else
1111 mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
1112 }
1113
1114 reg &= ~mask;
1115 reg |= mode;
1116
1117 writel(reg, priv->data.gmii_sel);
1118}
1119
1120static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
1121 phy_interface_t phy_mode)
1122{
1123 u32 reg;
1124 u32 mask;
1125 u32 mode = 0;
1126 int slave = priv->data.active_slave;
1127
1128 reg = readl(priv->data.gmii_sel);
1129
1130 switch (phy_mode) {
1131 case PHY_INTERFACE_MODE_RMII:
1132 mode = AM33XX_GMII_SEL_MODE_RMII;
1133 break;
1134
1135 case PHY_INTERFACE_MODE_RGMII:
1136 case PHY_INTERFACE_MODE_RGMII_ID:
1137 case PHY_INTERFACE_MODE_RGMII_RXID:
1138 case PHY_INTERFACE_MODE_RGMII_TXID:
1139 mode = AM33XX_GMII_SEL_MODE_RGMII;
1140 break;
1141
1142 case PHY_INTERFACE_MODE_MII:
1143 default:
1144 mode = AM33XX_GMII_SEL_MODE_MII;
1145 break;
1146 };
1147
1148 switch (slave) {
1149 case 0:
1150 mask = GMII_SEL_MODE_MASK;
1151 break;
1152 case 1:
1153 mask = GMII_SEL_MODE_MASK << 4;
1154 mode <<= 4;
1155 break;
1156 default:
1157 dev_err(priv->dev, "invalid slave number...\n");
1158 return;
1159 }
1160
1161 if (priv->data.rmii_clock_external)
1162 dev_err(priv->dev, "RMII External clock is not supported\n");
1163
1164 reg &= ~mask;
1165 reg |= mode;
1166
1167 writel(reg, priv->data.gmii_sel);
1168}
1169
1170static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
1171 phy_interface_t phy_mode)
1172{
1173 if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
1174 cpsw_gmii_sel_am3352(priv, phy_mode);
1175 if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
1176 cpsw_gmii_sel_am3352(priv, phy_mode);
1177 else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
1178 cpsw_gmii_sel_dra7xx(priv, phy_mode);
1179}
1180
Faiz Abbase50f8782019-03-18 13:54:32 +05301181static int cpsw_eth_probe(struct udevice *dev)
1182{
1183 struct cpsw_priv *priv = dev_get_priv(dev);
1184 struct eth_pdata *pdata = dev_get_platdata(dev);
1185
1186 priv->dev = dev;
1187 /* Select phy interface in control module */
1188 cpsw_phy_sel(priv, priv->data.phy_sel_compat,
1189 pdata->phy_interface);
1190
1191 return _cpsw_register(priv);
1192}
1193
Mugunthan V N4cc77892015-09-07 14:22:21 +05301194static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1195{
1196 struct eth_pdata *pdata = dev_get_platdata(dev);
1197 struct cpsw_priv *priv = dev_get_priv(dev);
Vignesh R2e205ef2016-08-02 10:14:27 +05301198 struct gpio_desc *mode_gpios;
Mugunthan V N4cc77892015-09-07 14:22:21 +05301199 const char *phy_mode;
1200 const void *fdt = gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -07001201 int node = dev_of_offset(dev);
Mugunthan V N4cc77892015-09-07 14:22:21 +05301202 int subnode;
1203 int slave_index = 0;
Mugunthan V N4cc77892015-09-07 14:22:21 +05301204 int active_slave;
Vignesh R2e205ef2016-08-02 10:14:27 +05301205 int num_mode_gpios;
Mugunthan V Ne4310562016-04-28 15:36:07 +05301206 int ret;
Mugunthan V N4cc77892015-09-07 14:22:21 +05301207
Simon Glassa821c4a2017-05-17 17:18:05 -06001208 pdata->iobase = devfdt_get_addr(dev);
Mugunthan V N4cc77892015-09-07 14:22:21 +05301209 priv->data.version = CPSW_CTRL_VERSION_2;
1210 priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1211 priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1212 priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1213 priv->data.mdio_div = CPSW_MDIO_DIV;
1214 priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1215
1216 pdata->phy_interface = -1;
1217
1218 priv->data.cpsw_base = pdata->iobase;
1219 priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1220 if (priv->data.channels <= 0) {
1221 printf("error: cpdma_channels not found in dt\n");
1222 return -ENOENT;
1223 }
1224
1225 priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1226 if (priv->data.slaves <= 0) {
1227 printf("error: slaves not found in dt\n");
1228 return -ENOENT;
1229 }
1230 priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1231 priv->data.slaves);
1232
1233 priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1234 if (priv->data.ale_entries <= 0) {
1235 printf("error: ale_entries not found in dt\n");
1236 return -ENOENT;
1237 }
1238
1239 priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1240 if (priv->data.bd_ram_ofs <= 0) {
1241 printf("error: bd_ram_size not found in dt\n");
1242 return -ENOENT;
1243 }
1244
1245 priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1246 if (priv->data.mac_control <= 0) {
1247 printf("error: ale_entries not found in dt\n");
1248 return -ENOENT;
1249 }
1250
Vignesh R2e205ef2016-08-02 10:14:27 +05301251 num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
1252 if (num_mode_gpios > 0) {
1253 mode_gpios = malloc(sizeof(struct gpio_desc) *
1254 num_mode_gpios);
1255 gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
1256 num_mode_gpios, GPIOD_IS_OUT);
1257 free(mode_gpios);
1258 }
1259
Mugunthan V N4cc77892015-09-07 14:22:21 +05301260 active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1261 priv->data.active_slave = active_slave;
1262
Simon Glassdf87e6b2016-10-02 17:59:29 -06001263 fdt_for_each_subnode(subnode, fdt, node) {
Mugunthan V N4cc77892015-09-07 14:22:21 +05301264 int len;
1265 const char *name;
1266
1267 name = fdt_get_name(fdt, subnode, &len);
1268 if (!strncmp(name, "mdio", 4)) {
Mugunthan V N66e740c2016-04-28 15:36:06 +05301269 u32 mdio_base;
1270
1271 mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1272 if (mdio_base == FDT_ADDR_T_NONE) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001273 pr_err("Not able to get MDIO address space\n");
Mugunthan V N66e740c2016-04-28 15:36:06 +05301274 return -ENOENT;
1275 }
1276 priv->data.mdio_base = mdio_base;
Mugunthan V N4cc77892015-09-07 14:22:21 +05301277 }
1278
1279 if (!strncmp(name, "slave", 5)) {
1280 u32 phy_id[2];
1281
Mugunthan V Nb2003c52016-04-28 15:36:04 +05301282 if (slave_index >= priv->data.slaves)
1283 continue;
Mugunthan V N4cc77892015-09-07 14:22:21 +05301284 phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1285 if (phy_mode)
1286 priv->data.slave_data[slave_index].phy_if =
1287 phy_get_interface_by_name(phy_mode);
Dan Murphycb386222016-05-02 15:45:56 -05001288
1289 priv->data.slave_data[slave_index].phy_of_handle =
1290 fdtdec_lookup_phandle(fdt, subnode,
1291 "phy-handle");
1292
1293 if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1294 priv->data.slave_data[slave_index].phy_addr =
1295 fdtdec_get_int(gd->fdt_blob,
1296 priv->data.slave_data[slave_index].phy_of_handle,
1297 "reg", -1);
1298 } else {
1299 fdtdec_get_int_array(fdt, subnode, "phy_id",
1300 phy_id, 2);
1301 priv->data.slave_data[slave_index].phy_addr =
1302 phy_id[1];
1303 }
Mugunthan V N4cc77892015-09-07 14:22:21 +05301304 slave_index++;
1305 }
1306
1307 if (!strncmp(name, "cpsw-phy-sel", 12)) {
Mugunthan V N66e740c2016-04-28 15:36:06 +05301308 priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1309 subnode);
1310
1311 if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001312 pr_err("Not able to get gmii_sel reg address\n");
Mugunthan V N66e740c2016-04-28 15:36:06 +05301313 return -ENOENT;
1314 }
Mugunthan V Nab971532016-10-13 19:33:38 +05301315
1316 if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
1317 NULL))
1318 priv->data.rmii_clock_external = true;
1319
Faiz Abbase50f8782019-03-18 13:54:32 +05301320 priv->data.phy_sel_compat = fdt_getprop(fdt, subnode,
1321 "compatible", NULL);
1322 if (!priv->data.phy_sel_compat) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001323 pr_err("Not able to get gmii_sel compatible\n");
Mugunthan V Nab971532016-10-13 19:33:38 +05301324 return -ENOENT;
1325 }
Mugunthan V N4cc77892015-09-07 14:22:21 +05301326 }
1327 }
1328
1329 priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1330 priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1331
1332 if (priv->data.slaves == 2) {
1333 priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1334 priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1335 }
1336
Mugunthan V Ne4310562016-04-28 15:36:07 +05301337 ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1338 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001339 pr_err("cpsw read efuse mac failed\n");
Mugunthan V Ne4310562016-04-28 15:36:07 +05301340 return ret;
1341 }
Mugunthan V N4cc77892015-09-07 14:22:21 +05301342
1343 pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1344 if (pdata->phy_interface == -1) {
1345 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1346 return -EINVAL;
1347 }
Mugunthan V Nab971532016-10-13 19:33:38 +05301348
Mugunthan V N4cc77892015-09-07 14:22:21 +05301349 return 0;
1350}
1351
Sekhar Norie2597be2018-08-23 17:11:29 +05301352int cpsw_get_slave_phy_addr(struct udevice *dev, int slave)
1353{
1354 struct cpsw_priv *priv = dev_get_priv(dev);
1355 struct cpsw_platform_data *data = &priv->data;
1356
1357 return data->slave_data[slave].phy_addr;
1358}
Mugunthan V N4cc77892015-09-07 14:22:21 +05301359
1360static const struct udevice_id cpsw_eth_ids[] = {
1361 { .compatible = "ti,cpsw" },
1362 { .compatible = "ti,am335x-cpsw" },
1363 { }
1364};
1365
1366U_BOOT_DRIVER(eth_cpsw) = {
1367 .name = "eth_cpsw",
1368 .id = UCLASS_ETH,
1369 .of_match = cpsw_eth_ids,
1370 .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1371 .probe = cpsw_eth_probe,
1372 .ops = &cpsw_eth_ops,
1373 .priv_auto_alloc_size = sizeof(struct cpsw_priv),
1374 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1375 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1376};
1377#endif /* CONFIG_DM_ETH */