blob: 428bfcab09f297273685fc4c0d859f1a8d8022c0 [file] [log] [blame]
Hou Zhiqiang07ce19f2019-04-08 10:15:46 +00001// SPDX-License-Identifier: GPL-2.0+ OR X11
2/*
3 * Copyright 2018-2019 NXP
4 *
5 * PCIe Gen4 driver for NXP Layerscape SoCs
6 * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
7 */
8
9#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060010#include <log.h>
Hou Zhiqiang07ce19f2019-04-08 10:15:46 +000011#include <asm/arch/fsl_serdes.h>
12#include <pci.h>
13#include <asm/io.h>
14#include <errno.h>
15#include <malloc.h>
16#include <dm.h>
17#include <linux/sizes.h>
18
19#include "pcie_layerscape_gen4.h"
20
21DECLARE_GLOBAL_DATA_PTR;
22
23LIST_HEAD(ls_pcie_g4_list);
24
25static u64 bar_size[4] = {
26 PCIE_BAR0_SIZE,
27 PCIE_BAR1_SIZE,
28 PCIE_BAR2_SIZE,
29 PCIE_BAR4_SIZE
30};
31
32static int ls_pcie_g4_ltssm(struct ls_pcie_g4 *pcie)
33{
34 u32 state;
35
36 state = pf_ctrl_readl(pcie, PCIE_LTSSM_STA) & LTSSM_STATE_MASK;
37
38 return state;
39}
40
41static int ls_pcie_g4_link_up(struct ls_pcie_g4 *pcie)
42{
43 int ltssm;
44
45 ltssm = ls_pcie_g4_ltssm(pcie);
46 if (ltssm != LTSSM_PCIE_L0)
47 return 0;
48
49 return 1;
50}
51
52static void ls_pcie_g4_ep_enable_cfg(struct ls_pcie_g4 *pcie)
53{
54 ccsr_writel(pcie, GPEX_CFG_READY, PCIE_CONFIG_READY);
55}
56
57static void ls_pcie_g4_cfg_set_target(struct ls_pcie_g4 *pcie, u32 target)
58{
59 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
60 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
61}
62
63static int ls_pcie_g4_outbound_win_set(struct ls_pcie_g4 *pcie, int idx,
64 int type, u64 phys, u64 bus_addr,
65 pci_size_t size)
66{
67 u32 val;
68 u32 size_h, size_l;
69
70 if (idx >= PAB_WINS_NUM)
71 return -EINVAL;
72
73 size_h = upper_32_bits(~(size - 1));
74 size_l = lower_32_bits(~(size - 1));
75
76 val = ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(idx));
77 val &= ~((AXI_AMAP_CTRL_TYPE_MASK << AXI_AMAP_CTRL_TYPE_SHIFT) |
78 (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT) |
79 AXI_AMAP_CTRL_EN);
80 val |= ((type & AXI_AMAP_CTRL_TYPE_MASK) << AXI_AMAP_CTRL_TYPE_SHIFT) |
81 ((size_l >> AXI_AMAP_CTRL_SIZE_SHIFT) <<
82 AXI_AMAP_CTRL_SIZE_SHIFT) | AXI_AMAP_CTRL_EN;
83
84 ccsr_writel(pcie, PAB_AXI_AMAP_CTRL(idx), val);
85
86 ccsr_writel(pcie, PAB_AXI_AMAP_AXI_WIN(idx), lower_32_bits(phys));
87 ccsr_writel(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(idx), upper_32_bits(phys));
88 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
89 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
90 ccsr_writel(pcie, PAB_EXT_AXI_AMAP_SIZE(idx), size_h);
91
92 return 0;
93}
94
95static int ls_pcie_g4_rc_inbound_win_set(struct ls_pcie_g4 *pcie, int idx,
96 int type, u64 phys, u64 bus_addr,
97 pci_size_t size)
98{
99 u32 val;
100 pci_size_t win_size = ~(size - 1);
101
102 val = ccsr_readl(pcie, PAB_PEX_AMAP_CTRL(idx));
103
104 val &= ~(PEX_AMAP_CTRL_TYPE_MASK << PEX_AMAP_CTRL_TYPE_SHIFT);
105 val &= ~(PEX_AMAP_CTRL_EN_MASK << PEX_AMAP_CTRL_EN_SHIFT);
106 val = (val | (type << PEX_AMAP_CTRL_TYPE_SHIFT));
107 val = (val | (1 << PEX_AMAP_CTRL_EN_SHIFT));
108
109 ccsr_writel(pcie, PAB_PEX_AMAP_CTRL(idx),
110 val | lower_32_bits(win_size));
111
112 ccsr_writel(pcie, PAB_EXT_PEX_AMAP_SIZE(idx), upper_32_bits(win_size));
113 ccsr_writel(pcie, PAB_PEX_AMAP_AXI_WIN(idx), lower_32_bits(phys));
114 ccsr_writel(pcie, PAB_EXT_PEX_AMAP_AXI_WIN(idx), upper_32_bits(phys));
115 ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
116 ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
117
118 return 0;
119}
120
121static void ls_pcie_g4_dump_wins(struct ls_pcie_g4 *pcie, int wins)
122{
123 int i;
124
125 for (i = 0; i < wins; i++) {
126 debug("APIO Win%d:\n", i);
127 debug("\tLOWER PHYS: 0x%08x\n",
128 ccsr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(i)));
129 debug("\tUPPER PHYS: 0x%08x\n",
130 ccsr_readl(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(i)));
131 debug("\tLOWER BUS: 0x%08x\n",
132 ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_L(i)));
133 debug("\tUPPER BUS: 0x%08x\n",
134 ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(i)));
135 debug("\tSIZE: 0x%08x\n",
136 ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)) &
137 (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT));
138 debug("\tEXT_SIZE: 0x%08x\n",
139 ccsr_readl(pcie, PAB_EXT_AXI_AMAP_SIZE(i)));
140 debug("\tPARAM: 0x%08x\n",
141 ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(i)));
142 debug("\tCTRL: 0x%08x\n",
143 ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)));
144 }
145}
146
147static void ls_pcie_g4_setup_wins(struct ls_pcie_g4 *pcie)
148{
149 struct pci_region *io, *mem, *pref;
150 int idx = 1;
151
152 /* INBOUND WIN */
153 ls_pcie_g4_rc_inbound_win_set(pcie, 0, IB_TYPE_MEM_F, 0, 0, SIZE_1T);
154
155 /* OUTBOUND WIN 0: CFG */
156 ls_pcie_g4_outbound_win_set(pcie, 0, PAB_AXI_TYPE_CFG,
157 pcie->cfg_res.start, 0,
158 fdt_resource_size(&pcie->cfg_res));
159
160 pci_get_regions(pcie->bus, &io, &mem, &pref);
161
162 if (io)
163 /* OUTBOUND WIN: IO */
164 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_IO,
165 io->phys_start, io->bus_start,
166 io->size);
167
168 if (mem)
169 /* OUTBOUND WIN: MEM */
170 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
171 mem->phys_start, mem->bus_start,
172 mem->size);
173
174 if (pref)
175 /* OUTBOUND WIN: perf MEM */
176 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
177 pref->phys_start, pref->bus_start,
178 pref->size);
179
180 ls_pcie_g4_dump_wins(pcie, idx);
181}
182
183/* Return 0 if the address is valid, -errno if not valid */
184static int ls_pcie_g4_addr_valid(struct ls_pcie_g4 *pcie, pci_dev_t bdf)
185{
186 struct udevice *bus = pcie->bus;
187
188 if (pcie->mode == PCI_HEADER_TYPE_NORMAL)
189 return -ENODEV;
190
191 if (!pcie->enabled)
192 return -ENXIO;
193
194 if (PCI_BUS(bdf) < bus->seq)
195 return -EINVAL;
196
197 if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_g4_link_up(pcie)))
198 return -EINVAL;
199
200 if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
201 return -EINVAL;
202
203 return 0;
204}
205
206void *ls_pcie_g4_conf_address(struct ls_pcie_g4 *pcie, pci_dev_t bdf,
207 int offset)
208{
209 struct udevice *bus = pcie->bus;
210 u32 target;
211
212 if (PCI_BUS(bdf) == bus->seq) {
213 if (offset < INDIRECT_ADDR_BNDRY) {
214 ccsr_set_page(pcie, 0);
215 return pcie->ccsr + offset;
216 }
217
218 ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
219 return pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset);
220 }
221
222 target = PAB_TARGET_BUS(PCI_BUS(bdf) - bus->seq) |
223 PAB_TARGET_DEV(PCI_DEV(bdf)) |
224 PAB_TARGET_FUNC(PCI_FUNC(bdf));
225
226 ls_pcie_g4_cfg_set_target(pcie, target);
227
228 return pcie->cfg + offset;
229}
230
Simon Glassc4e72c42020-01-27 08:49:37 -0700231static int ls_pcie_g4_read_config(const struct udevice *bus, pci_dev_t bdf,
Hou Zhiqiang07ce19f2019-04-08 10:15:46 +0000232 uint offset, ulong *valuep,
233 enum pci_size_t size)
234{
235 struct ls_pcie_g4 *pcie = dev_get_priv(bus);
236 void *address;
237 int ret = 0;
238
239 if (ls_pcie_g4_addr_valid(pcie, bdf)) {
240 *valuep = pci_get_ff(size);
241 return 0;
242 }
243
244 address = ls_pcie_g4_conf_address(pcie, bdf, offset);
245
246 switch (size) {
247 case PCI_SIZE_8:
248 *valuep = readb(address);
249 break;
250 case PCI_SIZE_16:
251 *valuep = readw(address);
252 break;
253 case PCI_SIZE_32:
254 *valuep = readl(address);
255 break;
256 default:
257 ret = -EINVAL;
258 break;
259 }
260
261 return ret;
262}
263
264static int ls_pcie_g4_write_config(struct udevice *bus, pci_dev_t bdf,
265 uint offset, ulong value,
266 enum pci_size_t size)
267{
268 struct ls_pcie_g4 *pcie = dev_get_priv(bus);
269 void *address;
270
271 if (ls_pcie_g4_addr_valid(pcie, bdf))
272 return 0;
273
274 address = ls_pcie_g4_conf_address(pcie, bdf, offset);
275
276 switch (size) {
277 case PCI_SIZE_8:
278 writeb(value, address);
279 return 0;
280 case PCI_SIZE_16:
281 writew(value, address);
282 return 0;
283 case PCI_SIZE_32:
284 writel(value, address);
285 return 0;
286 default:
287 return -EINVAL;
288 }
289}
290
291static void ls_pcie_g4_setup_ctrl(struct ls_pcie_g4 *pcie)
292{
293 u32 val;
294
295 /* Fix class code */
296 val = ccsr_readl(pcie, GPEX_CLASSCODE);
297 val &= ~(GPEX_CLASSCODE_MASK << GPEX_CLASSCODE_SHIFT);
298 val |= PCI_CLASS_BRIDGE_PCI << GPEX_CLASSCODE_SHIFT;
299 ccsr_writel(pcie, GPEX_CLASSCODE, val);
300
301 /* Enable APIO and Memory/IO/CFG Wins */
302 val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
303 val |= APIO_EN | MEM_WIN_EN | IO_WIN_EN | CFG_WIN_EN;
304 ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
305
306 ls_pcie_g4_setup_wins(pcie);
307
308 pcie->stream_id_cur = 0;
309}
310
311static void ls_pcie_g4_ep_inbound_win_set(struct ls_pcie_g4 *pcie, int pf,
312 int bar, u64 phys)
313{
314 u32 val;
315
316 /* PF BAR1 is for MSI-X and only need to enable */
317 if (bar == 1) {
318 ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), BAR_AMAP_EN);
319 return;
320 }
321
322 val = upper_32_bits(phys);
323 ccsr_writel(pcie, PAB_EXT_PEX_BAR_AMAP(pf, bar), val);
324 val = lower_32_bits(phys) | BAR_AMAP_EN;
325 ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), val);
326}
327
328static void ls_pcie_g4_ep_setup_wins(struct ls_pcie_g4 *pcie, int pf)
329{
330 u64 phys;
331 int bar;
332 u32 val;
333
334 if ((!pcie->sriov_support && pf > LS_G4_PF0) || pf > LS_G4_PF1)
335 return;
336
337 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR_SIZE * 4 * pf;
338 for (bar = 0; bar < PF_BAR_NUM; bar++) {
339 ls_pcie_g4_ep_inbound_win_set(pcie, pf, bar, phys);
340 phys += PCIE_BAR_SIZE;
341 }
342
343 /* OUTBOUND: map MEM */
344 ls_pcie_g4_outbound_win_set(pcie, pf, PAB_AXI_TYPE_MEM,
345 pcie->cfg_res.start +
346 CONFIG_SYS_PCI_MEMORY_SIZE * pf, 0x0,
347 CONFIG_SYS_PCI_MEMORY_SIZE);
348
349 val = ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf));
350 val &= ~FUNC_NUM_PCIE_MASK;
351 val |= pf;
352 ccsr_writel(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf), val);
353}
354
355static void ls_pcie_g4_ep_enable_bar(struct ls_pcie_g4 *pcie, int pf,
356 int bar, bool vf_bar, bool enable)
357{
358 u32 val;
359 u32 bar_pos = BAR_POS(bar, pf, vf_bar);
360
361 val = ccsr_readl(pcie, GPEX_BAR_ENABLE);
362 if (enable)
363 val |= 1 << bar_pos;
364 else
365 val &= ~(1 << bar_pos);
366 ccsr_writel(pcie, GPEX_BAR_ENABLE, val);
367}
368
369static void ls_pcie_g4_ep_set_bar_size(struct ls_pcie_g4 *pcie, int pf,
370 int bar, bool vf_bar, u64 size)
371{
372 u32 bar_pos = BAR_POS(bar, pf, vf_bar);
373 u32 mask_l = lower_32_bits(~(size - 1));
374 u32 mask_h = upper_32_bits(~(size - 1));
375
376 ccsr_writel(pcie, GPEX_BAR_SELECT, bar_pos);
377 ccsr_writel(pcie, GPEX_BAR_SIZE_LDW, mask_l);
378 ccsr_writel(pcie, GPEX_BAR_SIZE_UDW, mask_h);
379}
380
381static void ls_pcie_g4_ep_setup_bar(struct ls_pcie_g4 *pcie, int pf,
382 int bar, bool vf_bar, u64 size)
383{
384 bool en = size ? true : false;
385
386 ls_pcie_g4_ep_enable_bar(pcie, pf, bar, vf_bar, en);
387 ls_pcie_g4_ep_set_bar_size(pcie, pf, bar, vf_bar, size);
388}
389
390static void ls_pcie_g4_ep_setup_bars(struct ls_pcie_g4 *pcie, int pf)
391{
392 int bar;
393
394 /* Setup PF BARs */
395 for (bar = 0; bar < PF_BAR_NUM; bar++)
396 ls_pcie_g4_ep_setup_bar(pcie, pf, bar, false, bar_size[bar]);
397
398 if (!pcie->sriov_support)
399 return;
400
401 /* Setup VF BARs */
402 for (bar = 0; bar < VF_BAR_NUM; bar++)
403 ls_pcie_g4_ep_setup_bar(pcie, pf, bar, true, bar_size[bar]);
404}
405
406static void ls_pcie_g4_set_sriov(struct ls_pcie_g4 *pcie, int pf)
407{
408 unsigned int val;
409
410 val = ccsr_readl(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf));
411 val &= ~(TTL_VF_MASK << TTL_VF_SHIFT);
412 val |= PCIE_VF_NUM << TTL_VF_SHIFT;
413 val &= ~(INI_VF_MASK << INI_VF_SHIFT);
414 val |= PCIE_VF_NUM << INI_VF_SHIFT;
415 ccsr_writel(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf), val);
416
417 val = ccsr_readl(pcie, PCIE_SRIOV_VF_OFFSET_STRIDE);
418 val += PCIE_VF_NUM * pf - pf;
419 ccsr_writel(pcie, GPEX_SRIOV_VF_OFFSET_STRIDE(pf), val);
420}
421
422static void ls_pcie_g4_setup_ep(struct ls_pcie_g4 *pcie)
423{
424 u32 pf, sriov;
425 u32 val;
426 int i;
427
428 /* Enable APIO and Memory Win */
429 val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
430 val |= APIO_EN | MEM_WIN_EN;
431 ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
432
433 sriov = ccsr_readl(pcie, PCIE_SRIOV_CAPABILITY);
434 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
435 pcie->sriov_support = 1;
436
437 pf = pcie->sriov_support ? PCIE_PF_NUM : 1;
438
439 for (i = 0; i < pf; i++) {
440 ls_pcie_g4_ep_setup_bars(pcie, i);
441 ls_pcie_g4_ep_setup_wins(pcie, i);
442 if (pcie->sriov_support)
443 ls_pcie_g4_set_sriov(pcie, i);
444 }
445
446 ls_pcie_g4_ep_enable_cfg(pcie);
447 ls_pcie_g4_dump_wins(pcie, pf);
448}
449
450static int ls_pcie_g4_probe(struct udevice *dev)
451{
452 struct ls_pcie_g4 *pcie = dev_get_priv(dev);
453 const void *fdt = gd->fdt_blob;
454 int node = dev_of_offset(dev);
455 u32 link_ctrl_sta;
456 u32 val;
457 int ret;
458
459 pcie->bus = dev;
460
461 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
462 "ccsr", &pcie->ccsr_res);
463 if (ret) {
464 printf("ls-pcie-g4: resource \"ccsr\" not found\n");
465 return ret;
466 }
467
468 pcie->idx = (pcie->ccsr_res.start - PCIE_SYS_BASE_ADDR) /
469 PCIE_CCSR_SIZE;
470
471 list_add(&pcie->list, &ls_pcie_g4_list);
472
473 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
474 if (!pcie->enabled) {
475 printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
476 return 0;
477 }
478
479 pcie->ccsr = map_physmem(pcie->ccsr_res.start,
480 fdt_resource_size(&pcie->ccsr_res),
481 MAP_NOCACHE);
482
483 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
484 "config", &pcie->cfg_res);
485 if (ret) {
486 printf("%s: resource \"config\" not found\n", dev->name);
487 return ret;
488 }
489
490 pcie->cfg = map_physmem(pcie->cfg_res.start,
491 fdt_resource_size(&pcie->cfg_res),
492 MAP_NOCACHE);
493
494 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
495 "lut", &pcie->lut_res);
496 if (ret) {
497 printf("ls-pcie-g4: resource \"lut\" not found\n");
498 return ret;
499 }
500
501 pcie->lut = map_physmem(pcie->lut_res.start,
502 fdt_resource_size(&pcie->lut_res),
503 MAP_NOCACHE);
504
505 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
506 "pf_ctrl", &pcie->pf_ctrl_res);
507 if (ret) {
508 printf("ls-pcie-g4: resource \"pf_ctrl\" not found\n");
509 return ret;
510 }
511
512 pcie->pf_ctrl = map_physmem(pcie->pf_ctrl_res.start,
513 fdt_resource_size(&pcie->pf_ctrl_res),
514 MAP_NOCACHE);
515
516 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
517
518 debug("%s ccsr:%lx, cfg:0x%lx, big-endian:%d\n",
519 dev->name, (unsigned long)pcie->ccsr, (unsigned long)pcie->cfg,
520 pcie->big_endian);
521
522 pcie->mode = readb(pcie->ccsr + PCI_HEADER_TYPE) & 0x7f;
523
524 if (pcie->mode == PCI_HEADER_TYPE_NORMAL) {
525 printf("PCIe%u: %s %s", pcie->idx, dev->name, "Endpoint");
526 ls_pcie_g4_setup_ep(pcie);
527 } else {
528 printf("PCIe%u: %s %s", pcie->idx, dev->name, "Root Complex");
529 ls_pcie_g4_setup_ctrl(pcie);
530 }
531
532 /* Enable Amba & PEX PIO */
533 val = ccsr_readl(pcie, PAB_CTRL);
534 val |= PAB_CTRL_APIO_EN | PAB_CTRL_PPIO_EN;
535 ccsr_writel(pcie, PAB_CTRL, val);
536
537 val = ccsr_readl(pcie, PAB_PEX_PIO_CTRL(0));
538 val |= PPIO_EN;
539 ccsr_writel(pcie, PAB_PEX_PIO_CTRL(0), val);
540
541 if (!ls_pcie_g4_link_up(pcie)) {
542 /* Let the user know there's no PCIe link */
543 printf(": no link\n");
544 return 0;
545 }
546
547 /* Print the negotiated PCIe link width */
548 link_ctrl_sta = ccsr_readl(pcie, PCIE_LINK_CTRL_STA);
549 printf(": x%d gen%d\n",
550 (link_ctrl_sta >> PCIE_LINK_WIDTH_SHIFT & PCIE_LINK_WIDTH_MASK),
551 (link_ctrl_sta >> PCIE_LINK_SPEED_SHIFT) & PCIE_LINK_SPEED_MASK);
552
553 return 0;
554}
555
556static const struct dm_pci_ops ls_pcie_g4_ops = {
557 .read_config = ls_pcie_g4_read_config,
558 .write_config = ls_pcie_g4_write_config,
559};
560
561static const struct udevice_id ls_pcie_g4_ids[] = {
562 { .compatible = "fsl,lx2160a-pcie" },
563 { }
564};
565
566U_BOOT_DRIVER(pcie_layerscape_gen4) = {
567 .name = "pcie_layerscape_gen4",
568 .id = UCLASS_PCI,
569 .of_match = ls_pcie_g4_ids,
570 .ops = &ls_pcie_g4_ops,
571 .probe = ls_pcie_g4_probe,
572 .priv_auto_alloc_size = sizeof(struct ls_pcie_g4),
573};