Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Freescale i.MX6 PCI Express Root-Complex driver |
| 3 | * |
| 4 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
| 5 | * |
| 6 | * Based on upstream Linux kernel driver: |
| 7 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> |
| 8 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> |
| 9 | * |
| 10 | * SPDX-License-Identifier: GPL-2.0 |
| 11 | */ |
| 12 | |
| 13 | #include <common.h> |
| 14 | #include <pci.h> |
| 15 | #include <asm/arch/clock.h> |
| 16 | #include <asm/arch/iomux.h> |
| 17 | #include <asm/arch/crm_regs.h> |
Marek Vasut | bb01956 | 2014-02-03 21:46:22 +0100 | [diff] [blame] | 18 | #include <asm/gpio.h> |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 19 | #include <asm/io.h> |
Alexey Brodkin | 1ace402 | 2014-02-26 17:47:58 +0400 | [diff] [blame] | 20 | #include <linux/sizes.h> |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 21 | #include <errno.h> |
| 22 | |
| 23 | #define PCI_ACCESS_READ 0 |
| 24 | #define PCI_ACCESS_WRITE 1 |
| 25 | |
| 26 | #define MX6_DBI_ADDR 0x01ffc000 |
| 27 | #define MX6_DBI_SIZE 0x4000 |
| 28 | #define MX6_IO_ADDR 0x01000000 |
| 29 | #define MX6_IO_SIZE 0x100000 |
| 30 | #define MX6_MEM_ADDR 0x01100000 |
| 31 | #define MX6_MEM_SIZE 0xe00000 |
| 32 | #define MX6_ROOT_ADDR 0x01f00000 |
| 33 | #define MX6_ROOT_SIZE 0xfc000 |
| 34 | |
| 35 | /* PCIe Port Logic registers (memory-mapped) */ |
| 36 | #define PL_OFFSET 0x700 |
| 37 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) |
| 38 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) |
| 39 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) |
| 40 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) |
| 41 | |
| 42 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
| 43 | #define PCIE_PHY_CTRL_DATA_LOC 0 |
| 44 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 |
| 45 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 |
| 46 | #define PCIE_PHY_CTRL_WR_LOC 18 |
| 47 | #define PCIE_PHY_CTRL_RD_LOC 19 |
| 48 | |
| 49 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
| 50 | #define PCIE_PHY_STAT_DATA_LOC 0 |
| 51 | #define PCIE_PHY_STAT_ACK_LOC 16 |
| 52 | |
| 53 | /* PHY registers (not memory-mapped) */ |
| 54 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
| 55 | |
| 56 | #define PHY_RX_OVRD_IN_LO 0x1005 |
| 57 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) |
| 58 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) |
| 59 | |
| 60 | /* iATU registers */ |
| 61 | #define PCIE_ATU_VIEWPORT 0x900 |
| 62 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) |
| 63 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) |
| 64 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) |
| 65 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) |
| 66 | #define PCIE_ATU_CR1 0x904 |
| 67 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) |
| 68 | #define PCIE_ATU_TYPE_IO (0x2 << 0) |
| 69 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) |
| 70 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) |
| 71 | #define PCIE_ATU_CR2 0x908 |
| 72 | #define PCIE_ATU_ENABLE (0x1 << 31) |
| 73 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) |
| 74 | #define PCIE_ATU_LOWER_BASE 0x90C |
| 75 | #define PCIE_ATU_UPPER_BASE 0x910 |
| 76 | #define PCIE_ATU_LIMIT 0x914 |
| 77 | #define PCIE_ATU_LOWER_TARGET 0x918 |
| 78 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) |
| 79 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) |
| 80 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
| 81 | #define PCIE_ATU_UPPER_TARGET 0x91C |
| 82 | |
| 83 | /* |
| 84 | * PHY access functions |
| 85 | */ |
| 86 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) |
| 87 | { |
| 88 | u32 val; |
| 89 | u32 max_iterations = 10; |
| 90 | u32 wait_counter = 0; |
| 91 | |
| 92 | do { |
| 93 | val = readl(dbi_base + PCIE_PHY_STAT); |
| 94 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; |
| 95 | wait_counter++; |
| 96 | |
| 97 | if (val == exp_val) |
| 98 | return 0; |
| 99 | |
| 100 | udelay(1); |
| 101 | } while (wait_counter < max_iterations); |
| 102 | |
| 103 | return -ETIMEDOUT; |
| 104 | } |
| 105 | |
| 106 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) |
| 107 | { |
| 108 | u32 val; |
| 109 | int ret; |
| 110 | |
| 111 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
| 112 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 113 | |
| 114 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); |
| 115 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 116 | |
| 117 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 118 | if (ret) |
| 119 | return ret; |
| 120 | |
| 121 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
| 122 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 123 | |
| 124 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 125 | if (ret) |
| 126 | return ret; |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ |
| 132 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) |
| 133 | { |
| 134 | u32 val, phy_ctl; |
| 135 | int ret; |
| 136 | |
| 137 | ret = pcie_phy_wait_ack(dbi_base, addr); |
| 138 | if (ret) |
| 139 | return ret; |
| 140 | |
| 141 | /* assert Read signal */ |
| 142 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; |
| 143 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); |
| 144 | |
| 145 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 146 | if (ret) |
| 147 | return ret; |
| 148 | |
| 149 | val = readl(dbi_base + PCIE_PHY_STAT); |
| 150 | *data = val & 0xffff; |
| 151 | |
| 152 | /* deassert Read signal */ |
| 153 | writel(0x00, dbi_base + PCIE_PHY_CTRL); |
| 154 | |
| 155 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 156 | if (ret) |
| 157 | return ret; |
| 158 | |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) |
| 163 | { |
| 164 | u32 var; |
| 165 | int ret; |
| 166 | |
| 167 | /* write addr */ |
| 168 | /* cap addr */ |
| 169 | ret = pcie_phy_wait_ack(dbi_base, addr); |
| 170 | if (ret) |
| 171 | return ret; |
| 172 | |
| 173 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 174 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 175 | |
| 176 | /* capture data */ |
| 177 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); |
| 178 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 179 | |
| 180 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 181 | if (ret) |
| 182 | return ret; |
| 183 | |
| 184 | /* deassert cap data */ |
| 185 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 186 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 187 | |
| 188 | /* wait for ack de-assertion */ |
| 189 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 190 | if (ret) |
| 191 | return ret; |
| 192 | |
| 193 | /* assert wr signal */ |
| 194 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; |
| 195 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 196 | |
| 197 | /* wait for ack */ |
| 198 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 199 | if (ret) |
| 200 | return ret; |
| 201 | |
| 202 | /* deassert wr signal */ |
| 203 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 204 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 205 | |
| 206 | /* wait for ack de-assertion */ |
| 207 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 208 | if (ret) |
| 209 | return ret; |
| 210 | |
| 211 | writel(0x0, dbi_base + PCIE_PHY_CTRL); |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static int imx6_pcie_link_up(void) |
| 217 | { |
| 218 | u32 rc, ltssm; |
| 219 | int rx_valid, temp; |
| 220 | |
| 221 | /* link is debug bit 36, debug register 1 starts at bit 32 */ |
| 222 | rc = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1); |
| 223 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && |
| 224 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) |
| 225 | return -EAGAIN; |
| 226 | |
| 227 | /* |
| 228 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. |
| 229 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). |
| 230 | * If (MAC/LTSSM.state == Recovery.RcvrLock) |
| 231 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition |
| 232 | * to gen2 is stuck |
| 233 | */ |
| 234 | pcie_phy_read((void *)MX6_DBI_ADDR, PCIE_PHY_RX_ASIC_OUT, &rx_valid); |
| 235 | ltssm = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0) & 0x3F; |
| 236 | |
| 237 | if (rx_valid & 0x01) |
| 238 | return 0; |
| 239 | |
| 240 | if (ltssm != 0x0d) |
| 241 | return 0; |
| 242 | |
| 243 | printf("transition to gen2 is stuck, reset PHY!\n"); |
| 244 | |
| 245 | pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp); |
| 246 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
| 247 | pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp); |
| 248 | |
| 249 | udelay(3000); |
| 250 | |
| 251 | pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp); |
| 252 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
| 253 | pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | /* |
| 259 | * iATU region setup |
| 260 | */ |
| 261 | static int imx_pcie_regions_setup(void) |
| 262 | { |
| 263 | /* |
| 264 | * i.MX6 defines 16MB in the AXI address map for PCIe. |
| 265 | * |
| 266 | * That address space excepted the pcie registers is |
| 267 | * split and defined into different regions by iATU, |
| 268 | * with sizes and offsets as follows: |
| 269 | * |
| 270 | * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO |
| 271 | * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM |
| 272 | * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers |
| 273 | */ |
| 274 | |
| 275 | /* CMD reg:I/O space, MEM space, and Bus Master Enable */ |
| 276 | setbits_le32(MX6_DBI_ADDR | PCI_COMMAND, |
| 277 | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
| 278 | |
| 279 | /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI */ |
| 280 | setbits_le32(MX6_DBI_ADDR + PCI_CLASS_REVISION, |
| 281 | PCI_CLASS_BRIDGE_PCI << 16); |
| 282 | |
| 283 | /* Region #0 is used for Outbound CFG space access. */ |
| 284 | writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT); |
| 285 | |
| 286 | writel(MX6_ROOT_ADDR, MX6_DBI_ADDR + PCIE_ATU_LOWER_BASE); |
| 287 | writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_BASE); |
| 288 | writel(MX6_ROOT_ADDR + MX6_ROOT_SIZE, MX6_DBI_ADDR + PCIE_ATU_LIMIT); |
| 289 | |
| 290 | writel(0, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET); |
| 291 | writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_TARGET); |
| 292 | writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 293 | writel(PCIE_ATU_ENABLE, MX6_DBI_ADDR + PCIE_ATU_CR2); |
| 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * PCI Express accessors |
| 300 | */ |
| 301 | static uint32_t get_bus_address(pci_dev_t d, int where) |
| 302 | { |
| 303 | uint32_t va_address; |
| 304 | |
| 305 | /* Reconfigure Region #0 */ |
| 306 | writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT); |
| 307 | |
| 308 | if (PCI_BUS(d) < 2) |
| 309 | writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 310 | else |
| 311 | writel(PCIE_ATU_TYPE_CFG1, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 312 | |
| 313 | if (PCI_BUS(d) == 0) { |
| 314 | va_address = MX6_DBI_ADDR; |
| 315 | } else { |
| 316 | writel(d << 8, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET); |
| 317 | va_address = MX6_IO_ADDR + SZ_16M - SZ_1M; |
| 318 | } |
| 319 | |
| 320 | va_address += (where & ~0x3); |
| 321 | |
| 322 | return va_address; |
| 323 | } |
| 324 | |
| 325 | static int imx_pcie_addr_valid(pci_dev_t d) |
| 326 | { |
| 327 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1)) |
| 328 | return -EINVAL; |
| 329 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) |
| 330 | return -EINVAL; |
| 331 | return 0; |
| 332 | } |
| 333 | |
| 334 | /* |
| 335 | * Replace the original ARM DABT handler with a simple jump-back one. |
| 336 | * |
| 337 | * The problem here is that if we have a PCIe bridge attached to this PCIe |
| 338 | * controller, but no PCIe device is connected to the bridges' downstream |
| 339 | * port, the attempt to read/write from/to the config space will produce |
| 340 | * a DABT. This is a behavior of the controller and can not be disabled |
| 341 | * unfortuatelly. |
| 342 | * |
| 343 | * To work around the problem, we backup the current DABT handler address |
| 344 | * and replace it with our own DABT handler, which only bounces right back |
| 345 | * into the code. |
| 346 | */ |
| 347 | static void imx_pcie_fix_dabt_handler(bool set) |
| 348 | { |
| 349 | extern uint32_t *_data_abort; |
| 350 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; |
| 351 | |
| 352 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; |
| 353 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; |
| 354 | |
| 355 | static uint32_t data_abort_backup; |
| 356 | |
| 357 | if (set) { |
| 358 | data_abort_backup = *data_abort_addr; |
| 359 | *data_abort_addr = data_abort_bounce_addr; |
| 360 | } else { |
| 361 | *data_abort_addr = data_abort_backup; |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, |
| 366 | int where, u32 *val) |
| 367 | { |
| 368 | uint32_t va_address; |
| 369 | int ret; |
| 370 | |
| 371 | ret = imx_pcie_addr_valid(d); |
| 372 | if (ret) { |
| 373 | *val = 0xffffffff; |
| 374 | return ret; |
| 375 | } |
| 376 | |
| 377 | va_address = get_bus_address(d, where); |
| 378 | |
| 379 | /* |
| 380 | * Read the PCIe config space. We must replace the DABT handler |
| 381 | * here in case we got data abort from the PCIe controller, see |
| 382 | * imx_pcie_fix_dabt_handler() description. Note that writing the |
| 383 | * "val" with valid value is also imperative here as in case we |
| 384 | * did got DABT, the val would contain random value. |
| 385 | */ |
| 386 | imx_pcie_fix_dabt_handler(true); |
| 387 | writel(0xffffffff, val); |
| 388 | *val = readl(va_address); |
| 389 | imx_pcie_fix_dabt_handler(false); |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, |
| 395 | int where, u32 val) |
| 396 | { |
| 397 | uint32_t va_address = 0; |
| 398 | int ret; |
| 399 | |
| 400 | ret = imx_pcie_addr_valid(d); |
| 401 | if (ret) |
| 402 | return ret; |
| 403 | |
| 404 | va_address = get_bus_address(d, where); |
| 405 | |
| 406 | /* |
| 407 | * Write the PCIe config space. We must replace the DABT handler |
| 408 | * here in case we got data abort from the PCIe controller, see |
| 409 | * imx_pcie_fix_dabt_handler() description. |
| 410 | */ |
| 411 | imx_pcie_fix_dabt_handler(true); |
| 412 | writel(val, va_address); |
| 413 | imx_pcie_fix_dabt_handler(false); |
| 414 | |
| 415 | return 0; |
| 416 | } |
| 417 | |
| 418 | /* |
| 419 | * Initial bus setup |
| 420 | */ |
| 421 | static int imx6_pcie_assert_core_reset(void) |
| 422 | { |
| 423 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 424 | |
| 425 | setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); |
| 426 | clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); |
| 427 | |
| 428 | return 0; |
| 429 | } |
| 430 | |
| 431 | static int imx6_pcie_init_phy(void) |
| 432 | { |
| 433 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 434 | |
| 435 | clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); |
| 436 | |
| 437 | clrsetbits_le32(&iomuxc_regs->gpr[12], |
| 438 | IOMUXC_GPR12_DEVICE_TYPE_MASK, |
| 439 | IOMUXC_GPR12_DEVICE_TYPE_RC); |
| 440 | clrsetbits_le32(&iomuxc_regs->gpr[12], |
| 441 | IOMUXC_GPR12_LOS_LEVEL_MASK, |
| 442 | IOMUXC_GPR12_LOS_LEVEL_9); |
| 443 | |
| 444 | writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | |
| 445 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | |
| 446 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | |
| 447 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | |
| 448 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET), |
| 449 | &iomuxc_regs->gpr[8]); |
| 450 | |
| 451 | return 0; |
| 452 | } |
| 453 | |
Marek Vasut | a778aea | 2014-03-23 22:45:40 +0100 | [diff] [blame] | 454 | __weak int imx6_pcie_toggle_power(void) |
| 455 | { |
| 456 | #ifdef CONFIG_PCIE_IMX_POWER_GPIO |
| 457 | gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0); |
| 458 | mdelay(20); |
| 459 | gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1); |
| 460 | mdelay(20); |
| 461 | #endif |
| 462 | return 0; |
| 463 | } |
| 464 | |
Marek Vasut | bb01956 | 2014-02-03 21:46:22 +0100 | [diff] [blame] | 465 | __weak int imx6_pcie_toggle_reset(void) |
| 466 | { |
| 467 | /* |
| 468 | * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' |
| 469 | * for detailed understanding of the PCIe CR reset logic. |
| 470 | * |
| 471 | * The PCIe #PERST reset line _MUST_ be connected, otherwise your |
| 472 | * design does not conform to the specification. You must wait at |
| 473 | * least 20 mS after de-asserting the #PERST so the EP device can |
| 474 | * do self-initialisation. |
| 475 | * |
| 476 | * In case your #PERST pin is connected to a plain GPIO pin of the |
| 477 | * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's |
| 478 | * configuration file and the condition below will handle the rest |
| 479 | * of the reset toggling. |
| 480 | * |
| 481 | * In case your #PERST toggling logic is more complex, for example |
| 482 | * connected via CPLD or somesuch, you can override this function |
| 483 | * in your board file and implement reset logic as needed. You must |
| 484 | * not forget to wait at least 20 mS after de-asserting #PERST in |
| 485 | * this case either though. |
| 486 | * |
| 487 | * In case your #PERST line of the PCIe EP device is not connected |
| 488 | * at all, your design is broken and you should fix your design, |
| 489 | * otherwise you will observe problems like for example the link |
| 490 | * not coming up after rebooting the system back from running Linux |
| 491 | * that uses the PCIe as well OR the PCIe link might not come up in |
| 492 | * Linux at all in the first place since it's in some non-reset |
| 493 | * state due to being previously used in U-Boot. |
| 494 | */ |
| 495 | #ifdef CONFIG_PCIE_IMX_PERST_GPIO |
| 496 | gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0); |
| 497 | mdelay(20); |
| 498 | gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1); |
| 499 | mdelay(20); |
| 500 | #else |
| 501 | puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); |
| 502 | #endif |
| 503 | return 0; |
| 504 | } |
| 505 | |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 506 | static int imx6_pcie_deassert_core_reset(void) |
| 507 | { |
| 508 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 509 | |
Marek Vasut | a778aea | 2014-03-23 22:45:40 +0100 | [diff] [blame] | 510 | imx6_pcie_toggle_power(); |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 511 | |
| 512 | /* Enable PCIe */ |
| 513 | clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); |
| 514 | setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); |
| 515 | |
| 516 | enable_pcie_clock(); |
| 517 | |
| 518 | /* |
| 519 | * Wait for the clock to settle a bit, when the clock are sourced |
| 520 | * from the CPU, we need about 30mS to settle. |
| 521 | */ |
Marek Vasut | bb01956 | 2014-02-03 21:46:22 +0100 | [diff] [blame] | 522 | mdelay(50); |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 523 | |
Marek Vasut | bb01956 | 2014-02-03 21:46:22 +0100 | [diff] [blame] | 524 | imx6_pcie_toggle_reset(); |
Marek Vasut | e9be429 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 525 | |
| 526 | return 0; |
| 527 | } |
| 528 | |
| 529 | static int imx_pcie_link_up(void) |
| 530 | { |
| 531 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 532 | uint32_t tmp; |
| 533 | int count = 0; |
| 534 | |
| 535 | imx6_pcie_assert_core_reset(); |
| 536 | imx6_pcie_init_phy(); |
| 537 | imx6_pcie_deassert_core_reset(); |
| 538 | |
| 539 | imx_pcie_regions_setup(); |
| 540 | |
| 541 | /* |
| 542 | * FIXME: Force the PCIe RC to Gen1 operation |
| 543 | * The RC must be forced into Gen1 mode before bringing the link |
| 544 | * up, otherwise no downstream devices are detected. After the |
| 545 | * link is up, a managed Gen1->Gen2 transition can be initiated. |
| 546 | */ |
| 547 | tmp = readl(MX6_DBI_ADDR + 0x7c); |
| 548 | tmp &= ~0xf; |
| 549 | tmp |= 0x1; |
| 550 | writel(tmp, MX6_DBI_ADDR + 0x7c); |
| 551 | |
| 552 | /* LTSSM enable, starting link. */ |
| 553 | setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); |
| 554 | |
| 555 | while (!imx6_pcie_link_up()) { |
| 556 | udelay(10); |
| 557 | count++; |
| 558 | if (count >= 2000) { |
| 559 | debug("phy link never came up\n"); |
| 560 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", |
| 561 | readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0), |
| 562 | readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1)); |
| 563 | return -EINVAL; |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | return 0; |
| 568 | } |
| 569 | |
| 570 | void imx_pcie_init(void) |
| 571 | { |
| 572 | /* Static instance of the controller. */ |
| 573 | static struct pci_controller pcc; |
| 574 | struct pci_controller *hose = &pcc; |
| 575 | int ret; |
| 576 | |
| 577 | memset(&pcc, 0, sizeof(pcc)); |
| 578 | |
| 579 | /* PCI I/O space */ |
| 580 | pci_set_region(&hose->regions[0], |
| 581 | MX6_IO_ADDR, MX6_IO_ADDR, |
| 582 | MX6_IO_SIZE, PCI_REGION_IO); |
| 583 | |
| 584 | /* PCI memory space */ |
| 585 | pci_set_region(&hose->regions[1], |
| 586 | MX6_MEM_ADDR, MX6_MEM_ADDR, |
| 587 | MX6_MEM_SIZE, PCI_REGION_MEM); |
| 588 | |
| 589 | /* System memory space */ |
| 590 | pci_set_region(&hose->regions[2], |
| 591 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, |
| 592 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); |
| 593 | |
| 594 | hose->region_count = 3; |
| 595 | |
| 596 | pci_set_ops(hose, |
| 597 | pci_hose_read_config_byte_via_dword, |
| 598 | pci_hose_read_config_word_via_dword, |
| 599 | imx_pcie_read_config, |
| 600 | pci_hose_write_config_byte_via_dword, |
| 601 | pci_hose_write_config_word_via_dword, |
| 602 | imx_pcie_write_config); |
| 603 | |
| 604 | /* Start the controller. */ |
| 605 | ret = imx_pcie_link_up(); |
| 606 | |
| 607 | if (!ret) { |
| 608 | pci_register_hose(hose); |
| 609 | hose->last_busno = pci_hose_scan(hose); |
| 610 | } |
| 611 | } |
| 612 | |
| 613 | /* Probe function. */ |
| 614 | void pci_init_board(void) |
| 615 | { |
| 616 | imx_pcie_init(); |
| 617 | } |