blob: 90b9fe2a17a82b34136754adaa9ea5b2cf590da5 [file] [log] [blame]
Minghuan Lianda419022014-10-31 13:43:44 +08001/*
Minghuan Liane4e8cb72015-01-21 17:29:20 +08002 * Copyright 2014-2015 Freescale Semiconductor, Inc.
Minghuan Lianda419022014-10-31 13:43:44 +08003 * Layerscape PCIe driver
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#include <common.h>
9#include <asm/arch/fsl_serdes.h>
10#include <pci.h>
11#include <asm/io.h>
Minghuan Liane4e8cb72015-01-21 17:29:20 +080012#include <errno.h>
13#include <malloc.h>
Minghuan Lian80afc632016-12-13 14:54:17 +080014#include <dm.h>
Hou Zhiqianga7294ab2016-12-13 14:54:16 +080015#include "pcie_layerscape.h"
Minghuan Liane4e8cb72015-01-21 17:29:20 +080016
Minghuan Lian80afc632016-12-13 14:54:17 +080017DECLARE_GLOBAL_DATA_PTR;
18
Minghuan Lian80afc632016-12-13 14:54:17 +080019LIST_HEAD(ls_pcie_list);
20
21static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
22{
23 return in_le32(pcie->dbi + offset);
24}
25
26static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
27 unsigned int offset)
28{
29 out_le32(pcie->dbi + offset, value);
30}
31
32static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
33{
34 if (pcie->big_endian)
35 return in_be32(pcie->ctrl + offset);
36 else
37 return in_le32(pcie->ctrl + offset);
38}
39
40static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
41 unsigned int offset)
42{
43 if (pcie->big_endian)
44 out_be32(pcie->ctrl + offset, value);
45 else
46 out_le32(pcie->ctrl + offset, value);
47}
48
49static int ls_pcie_ltssm(struct ls_pcie *pcie)
50{
51 u32 state;
52 uint svr;
53
54 svr = get_svr();
55 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
56 state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
57 state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
58 } else {
59 state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
60 }
61
62 return state;
63}
64
65static int ls_pcie_link_up(struct ls_pcie *pcie)
66{
67 int ltssm;
68
69 ltssm = ls_pcie_ltssm(pcie);
70 if (ltssm < LTSSM_PCIE_L0)
71 return 0;
72
73 return 1;
74}
75
76static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
77{
78 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
79 PCIE_ATU_VIEWPORT);
80 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
81}
82
83static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
84{
85 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
86 PCIE_ATU_VIEWPORT);
87 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
88}
89
90static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
91 u64 phys, u64 bus_addr, pci_size_t size)
92{
93 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
94 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
95 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
96 dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
97 dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
98 dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
99 dbi_writel(pcie, type, PCIE_ATU_CR1);
100 dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
101}
102
103/* Use bar match mode and MEM type as default */
104static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
105 int bar, u64 phys)
106{
107 dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
108 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
109 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
110 dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
111 dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
112 PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
113}
114
115static void ls_pcie_dump_atu(struct ls_pcie *pcie)
116{
117 int i;
118
119 for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
120 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
121 PCIE_ATU_VIEWPORT);
122 debug("iATU%d:\n", i);
123 debug("\tLOWER PHYS 0x%08x\n",
124 dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
125 debug("\tUPPER PHYS 0x%08x\n",
126 dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
127 debug("\tLOWER BUS 0x%08x\n",
128 dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
129 debug("\tUPPER BUS 0x%08x\n",
130 dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
131 debug("\tLIMIT 0x%08x\n",
132 readl(pcie->dbi + PCIE_ATU_LIMIT));
133 debug("\tCR1 0x%08x\n",
134 dbi_readl(pcie, PCIE_ATU_CR1));
135 debug("\tCR2 0x%08x\n",
136 dbi_readl(pcie, PCIE_ATU_CR2));
137 }
138}
139
140static void ls_pcie_setup_atu(struct ls_pcie *pcie)
141{
142 struct pci_region *io, *mem, *pref;
143 unsigned long long offset = 0;
144 int idx = 0;
145 uint svr;
146
147 svr = get_svr();
148 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
149 offset = LS1021_PCIE_SPACE_OFFSET +
150 LS1021_PCIE_SPACE_SIZE * pcie->idx;
151 }
152
153 /* ATU 0 : OUTBOUND : CFG0 */
154 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
155 PCIE_ATU_TYPE_CFG0,
156 pcie->cfg_res.start + offset,
157 0,
158 fdt_resource_size(&pcie->cfg_res) / 2);
159 /* ATU 1 : OUTBOUND : CFG1 */
160 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
161 PCIE_ATU_TYPE_CFG1,
162 pcie->cfg_res.start + offset +
163 fdt_resource_size(&pcie->cfg_res) / 2,
164 0,
165 fdt_resource_size(&pcie->cfg_res) / 2);
166
167 pci_get_regions(pcie->bus, &io, &mem, &pref);
168 idx = PCIE_ATU_REGION_INDEX1 + 1;
169
170 if (io)
171 /* ATU : OUTBOUND : IO */
172 ls_pcie_atu_outbound_set(pcie, idx++,
173 PCIE_ATU_TYPE_IO,
174 io->phys_start + offset,
175 io->bus_start,
176 io->size);
177
178 if (mem)
179 /* ATU : OUTBOUND : MEM */
180 ls_pcie_atu_outbound_set(pcie, idx++,
181 PCIE_ATU_TYPE_MEM,
182 mem->phys_start + offset,
183 mem->bus_start,
184 mem->size);
185
186 if (pref)
187 /* ATU : OUTBOUND : pref */
188 ls_pcie_atu_outbound_set(pcie, idx++,
189 PCIE_ATU_TYPE_MEM,
190 pref->phys_start + offset,
191 pref->bus_start,
192 pref->size);
193
194 ls_pcie_dump_atu(pcie);
195}
196
197/* Return 0 if the address is valid, -errno if not valid */
198static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
199{
200 struct udevice *bus = pcie->bus;
201
202 if (!pcie->enabled)
203 return -ENXIO;
204
205 if (PCI_BUS(bdf) < bus->seq)
206 return -EINVAL;
207
208 if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
209 return -EINVAL;
210
211 if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
212 return -EINVAL;
213
214 return 0;
215}
216
217void *ls_pcie_conf_address(struct ls_pcie *pcie, pci_dev_t bdf,
218 int offset)
219{
220 struct udevice *bus = pcie->bus;
221 u32 busdev;
222
223 if (PCI_BUS(bdf) == bus->seq)
224 return pcie->dbi + offset;
225
226 busdev = PCIE_ATU_BUS(PCI_BUS(bdf)) |
227 PCIE_ATU_DEV(PCI_DEV(bdf)) |
228 PCIE_ATU_FUNC(PCI_FUNC(bdf));
229
230 if (PCI_BUS(bdf) == bus->seq + 1) {
231 ls_pcie_cfg0_set_busdev(pcie, busdev);
232 return pcie->cfg0 + offset;
233 } else {
234 ls_pcie_cfg1_set_busdev(pcie, busdev);
235 return pcie->cfg1 + offset;
236 }
237}
238
239static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
240 uint offset, ulong *valuep,
241 enum pci_size_t size)
242{
243 struct ls_pcie *pcie = dev_get_priv(bus);
244 void *address;
245
246 if (ls_pcie_addr_valid(pcie, bdf)) {
247 *valuep = pci_get_ff(size);
248 return 0;
249 }
250
251 address = ls_pcie_conf_address(pcie, bdf, offset);
252
253 switch (size) {
254 case PCI_SIZE_8:
255 *valuep = readb(address);
256 return 0;
257 case PCI_SIZE_16:
258 *valuep = readw(address);
259 return 0;
260 case PCI_SIZE_32:
261 *valuep = readl(address);
262 return 0;
263 default:
264 return -EINVAL;
265 }
266}
267
268static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
269 uint offset, ulong value,
270 enum pci_size_t size)
271{
272 struct ls_pcie *pcie = dev_get_priv(bus);
273 void *address;
274
275 if (ls_pcie_addr_valid(pcie, bdf))
276 return 0;
277
278 address = ls_pcie_conf_address(pcie, bdf, offset);
279
280 switch (size) {
281 case PCI_SIZE_8:
282 writeb(value, address);
283 return 0;
284 case PCI_SIZE_16:
285 writew(value, address);
286 return 0;
287 case PCI_SIZE_32:
288 writel(value, address);
289 return 0;
290 default:
291 return -EINVAL;
292 }
293}
294
295/* Clear multi-function bit */
296static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
297{
298 writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
299}
300
301/* Fix class value */
302static void ls_pcie_fix_class(struct ls_pcie *pcie)
303{
304 writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
305}
306
307/* Drop MSG TLP except for Vendor MSG */
308static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
309{
310 u32 val;
311
312 val = dbi_readl(pcie, PCIE_STRFMR1);
313 val &= 0xDFFFFFFF;
314 dbi_writel(pcie, val, PCIE_STRFMR1);
315}
316
317/* Disable all bars in RC mode */
318static void ls_pcie_disable_bars(struct ls_pcie *pcie)
319{
320 u32 sriov;
321
322 sriov = in_le32(pcie->dbi + PCIE_SRIOV);
323
324 /*
325 * TODO: For PCIe controller with SRIOV, the method to disable bars
326 * is different and more complex, so will add later.
327 */
328 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
329 return;
330
331 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
332 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
333 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
334}
335
336static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
337{
338 ls_pcie_setup_atu(pcie);
339
340 dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
341 ls_pcie_fix_class(pcie);
342 ls_pcie_clear_multifunction(pcie);
343 ls_pcie_drop_msg_tlp(pcie);
344 dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
345
346 ls_pcie_disable_bars(pcie);
347}
348
349static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
350{
351 u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
352
353 /* ATU 0 : INBOUND : map BAR0 */
354 ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
355 /* ATU 1 : INBOUND : map BAR1 */
356 phys += PCIE_BAR1_SIZE;
357 ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
358 /* ATU 2 : INBOUND : map BAR2 */
359 phys += PCIE_BAR2_SIZE;
360 ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
361 /* ATU 3 : INBOUND : map BAR4 */
362 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
363 ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
364
365 /* ATU 0 : OUTBOUND : map MEM */
366 ls_pcie_atu_outbound_set(pcie, 0,
367 PCIE_ATU_TYPE_MEM,
368 pcie->cfg_res.start,
369 0,
370 CONFIG_SYS_PCI_MEMORY_SIZE);
371}
372
373/* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
374static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
375{
376 /* The least inbound window is 4KiB */
377 if (size < 4 * 1024)
378 return;
379
380 switch (bar) {
381 case 0:
382 writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
383 break;
384 case 1:
385 writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
386 break;
387 case 2:
388 writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
389 writel(0, bar_base + PCI_BASE_ADDRESS_3);
390 break;
391 case 4:
392 writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
393 writel(0, bar_base + PCI_BASE_ADDRESS_5);
394 break;
395 default:
396 break;
397 }
398}
399
400static void ls_pcie_ep_setup_bars(void *bar_base)
401{
402 /* BAR0 - 32bit - 4K configuration */
403 ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
404 /* BAR1 - 32bit - 8K MSIX*/
405 ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
406 /* BAR2 - 64bit - 4K MEM desciptor */
407 ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
408 /* BAR4 - 64bit - 1M MEM*/
409 ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
410}
411
412static void ls_pcie_setup_ep(struct ls_pcie *pcie)
413{
414 u32 sriov;
415
416 sriov = readl(pcie->dbi + PCIE_SRIOV);
417 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
418 int pf, vf;
419
420 for (pf = 0; pf < PCIE_PF_NUM; pf++) {
421 for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
422 ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
423 PCIE_PF_VF_CTRL);
424
425 ls_pcie_ep_setup_bars(pcie->dbi);
426 ls_pcie_ep_setup_atu(pcie);
427 }
428 }
429 /* Disable CFG2 */
430 ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
431 } else {
432 ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
433 ls_pcie_ep_setup_atu(pcie);
434 }
435}
436
437static int ls_pcie_probe(struct udevice *dev)
438{
439 struct ls_pcie *pcie = dev_get_priv(dev);
440 const void *fdt = gd->fdt_blob;
441 int node = dev->of_offset;
442 u8 header_type;
443 u16 link_sta;
444 bool ep_mode;
445 int ret;
446
447 pcie->bus = dev;
448
449 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
450 "dbi", &pcie->dbi_res);
451 if (ret) {
452 printf("ls-pcie: resource \"dbi\" not found\n");
453 return ret;
454 }
455
456 pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
457
458 list_add(&pcie->list, &ls_pcie_list);
459
460 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
461 if (!pcie->enabled) {
462 printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
463 return 0;
464 }
465
466 pcie->dbi = map_physmem(pcie->dbi_res.start,
467 fdt_resource_size(&pcie->dbi_res),
468 MAP_NOCACHE);
469
470 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
471 "lut", &pcie->lut_res);
472 if (!ret)
473 pcie->lut = map_physmem(pcie->lut_res.start,
474 fdt_resource_size(&pcie->lut_res),
475 MAP_NOCACHE);
476
477 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
478 "ctrl", &pcie->ctrl_res);
479 if (!ret)
480 pcie->ctrl = map_physmem(pcie->ctrl_res.start,
481 fdt_resource_size(&pcie->ctrl_res),
482 MAP_NOCACHE);
483 if (!pcie->ctrl)
484 pcie->ctrl = pcie->lut;
485
486 if (!pcie->ctrl) {
487 printf("%s: NOT find CTRL\n", dev->name);
488 return -1;
489 }
490
491 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
492 "config", &pcie->cfg_res);
493 if (ret) {
494 printf("%s: resource \"config\" not found\n", dev->name);
495 return ret;
496 }
497
498 pcie->cfg0 = map_physmem(pcie->cfg_res.start,
499 fdt_resource_size(&pcie->cfg_res),
500 MAP_NOCACHE);
501 pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
502
503 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
504
505 debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
506 dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
507 (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
508 pcie->big_endian);
509
510 header_type = readb(pcie->dbi + PCI_HEADER_TYPE);
511 ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
512 printf("PCIe%u: %s %s", pcie->idx, dev->name,
513 ep_mode ? "Endpoint" : "Root Complex");
514
515 if (ep_mode)
516 ls_pcie_setup_ep(pcie);
517 else
518 ls_pcie_setup_ctrl(pcie);
519
520 if (!ls_pcie_link_up(pcie)) {
521 /* Let the user know there's no PCIe link */
522 printf(": no link\n");
523 return 0;
524 }
525
526 /* Print the negotiated PCIe link width */
527 link_sta = readw(pcie->dbi + PCIE_LINK_STA);
528 printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
529 link_sta & PCIE_LINK_SPEED_MASK);
530
531 return 0;
532}
533
534static const struct dm_pci_ops ls_pcie_ops = {
535 .read_config = ls_pcie_read_config,
536 .write_config = ls_pcie_write_config,
537};
538
539static const struct udevice_id ls_pcie_ids[] = {
540 { .compatible = "fsl,ls-pcie" },
541 { }
542};
543
544U_BOOT_DRIVER(pci_layerscape) = {
545 .name = "pci_layerscape",
546 .id = UCLASS_PCI,
547 .of_match = ls_pcie_ids,
548 .ops = &ls_pcie_ops,
549 .probe = ls_pcie_probe,
550 .priv_auto_alloc_size = sizeof(struct ls_pcie),
551};