Suneel Garapati | 4684a7a | 2020-08-26 14:37:42 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2018 Marvell International Ltd. |
| 4 | */ |
| 5 | |
| 6 | #include <dm.h> |
| 7 | #include <errno.h> |
| 8 | #include <log.h> |
| 9 | #include <malloc.h> |
| 10 | #include <memalign.h> |
| 11 | #include <misc.h> |
| 12 | #include <net.h> |
| 13 | #include <pci.h> |
| 14 | #include <watchdog.h> |
| 15 | |
| 16 | #include <asm/arch/board.h> |
| 17 | #include <asm/arch/csrs/csrs-lmt.h> |
| 18 | #include <asm/io.h> |
| 19 | #include <asm/types.h> |
| 20 | |
| 21 | #include <linux/delay.h> |
| 22 | #include <linux/log2.h> |
| 23 | #include <linux/types.h> |
| 24 | |
| 25 | #include "nix.h" |
| 26 | #include "lmt.h" |
| 27 | #include "cgx.h" |
| 28 | |
| 29 | /** |
| 30 | * NIX needs a lot of memory areas. Rather than handle all the failure cases, |
| 31 | * we'll use a wrapper around alloc that prints an error if a memory |
| 32 | * allocation fails. |
| 33 | * |
| 34 | * @param num_elements |
| 35 | * Number of elements to allocate |
| 36 | * @param elem_size Size of each element |
| 37 | * @param msg Text string to show when allocation fails |
| 38 | * |
Heinrich Schuchardt | 185f812 | 2022-01-19 18:05:50 +0100 | [diff] [blame] | 39 | * Return: A valid memory location or NULL on failure |
Suneel Garapati | 4684a7a | 2020-08-26 14:37:42 +0200 | [diff] [blame] | 40 | */ |
| 41 | static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg) |
| 42 | { |
| 43 | size_t alloc_size = num_elements * elem_size; |
| 44 | void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size); |
| 45 | |
| 46 | if (!base) |
| 47 | printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n", |
| 48 | msg ? msg : __func__, num_elements, elem_size, |
| 49 | alloc_size); |
| 50 | else |
| 51 | memset(base, 0, alloc_size); |
| 52 | |
| 53 | debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n", |
| 54 | msg ? msg : __func__, num_elements, elem_size, alloc_size, base); |
| 55 | return base; |
| 56 | } |
| 57 | |
| 58 | int npc_lf_setup(struct nix *nix) |
| 59 | { |
| 60 | int err; |
| 61 | |
| 62 | err = npc_lf_admin_setup(nix); |
| 63 | if (err) { |
| 64 | printf("%s: Error setting up npc lf admin\n", __func__); |
| 65 | return err; |
| 66 | } |
| 67 | |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static int npa_setup_pool(struct npa *npa, u32 pool_id, |
| 72 | size_t buffer_size, u32 queue_length, void *buffers[]) |
| 73 | { |
| 74 | struct { |
| 75 | union npa_lf_aura_op_free0 f0; |
| 76 | union npa_lf_aura_op_free1 f1; |
| 77 | } aura_descr; |
| 78 | int index; |
| 79 | |
| 80 | for (index = 0; index < queue_length; index++) { |
| 81 | buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE, |
| 82 | buffer_size); |
| 83 | if (!buffers[index]) { |
| 84 | printf("%s: Out of memory %d, size: %zu\n", |
| 85 | __func__, index, buffer_size); |
| 86 | return -ENOMEM; |
| 87 | } |
| 88 | debug("%s: allocating buffer %d, addr %p size: %zu\n", |
| 89 | __func__, index, buffers[index], buffer_size); |
| 90 | |
| 91 | /* Add the newly obtained pointer to the pool. 128 bit |
| 92 | * writes only. |
| 93 | */ |
| 94 | aura_descr.f0.s.addr = (u64)buffers[index]; |
| 95 | aura_descr.f1.u = 0; |
| 96 | aura_descr.f1.s.aura = pool_id; |
| 97 | st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(), |
| 98 | aura_descr.f0.u, aura_descr.f1.u); |
| 99 | } |
| 100 | |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | int npa_lf_setup(struct nix *nix) |
| 105 | { |
| 106 | struct rvu_pf *rvu = dev_get_priv(nix->dev); |
| 107 | struct nix_af *nix_af = nix->nix_af; |
| 108 | struct npa *npa; |
| 109 | union npa_af_const npa_af_const; |
| 110 | union npa_aura_s *aura; |
| 111 | union npa_pool_s *pool; |
| 112 | union rvu_func_addr_s block_addr; |
| 113 | int idx; |
| 114 | int stack_page_pointers; |
| 115 | int stack_page_bytes; |
| 116 | int err; |
| 117 | |
| 118 | npa = (struct npa *)calloc(1, sizeof(struct npa)); |
| 119 | if (!npa) { |
| 120 | printf("%s: out of memory for npa instance\n", __func__); |
| 121 | return -ENOMEM; |
| 122 | } |
| 123 | block_addr.u = 0; |
| 124 | block_addr.s.block = RVU_BLOCK_ADDR_E_NPA; |
| 125 | npa->npa_base = rvu->pf_base + block_addr.u; |
| 126 | npa->npa_af = nix_af->npa_af; |
| 127 | nix->npa = npa; |
| 128 | |
| 129 | npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST()); |
| 130 | stack_page_pointers = npa_af_const.s.stack_page_ptrs; |
| 131 | stack_page_bytes = npa_af_const.s.stack_page_bytes; |
| 132 | |
| 133 | npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) / |
| 134 | stack_page_pointers; |
| 135 | npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) / |
| 136 | stack_page_pointers; |
| 137 | npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) / |
| 138 | stack_page_pointers; |
| 139 | npa->pool_stack_pointers = stack_page_pointers; |
| 140 | |
| 141 | npa->q_len[NPA_POOL_RX] = RQ_QLEN; |
| 142 | npa->q_len[NPA_POOL_TX] = SQ_QLEN; |
| 143 | npa->q_len[NPA_POOL_SQB] = SQB_QLEN; |
| 144 | |
| 145 | npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE; |
| 146 | npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE; |
| 147 | npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size; |
| 148 | |
| 149 | npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT, |
| 150 | sizeof(union npa_aura_s), |
| 151 | "aura context"); |
| 152 | if (!npa->aura_ctx) { |
| 153 | printf("%s: Out of memory for aura context\n", __func__); |
| 154 | return -ENOMEM; |
| 155 | } |
| 156 | |
| 157 | for (idx = 0; idx < NPA_POOL_COUNT; idx++) { |
| 158 | npa->pool_ctx[idx] = nix_memalloc(1, |
| 159 | sizeof(union npa_pool_s), |
| 160 | "pool context"); |
| 161 | if (!npa->pool_ctx[idx]) { |
| 162 | printf("%s: Out of memory for pool context\n", |
| 163 | __func__); |
| 164 | return -ENOMEM; |
| 165 | } |
| 166 | npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx], |
| 167 | stack_page_bytes, |
| 168 | "pool stack"); |
| 169 | if (!npa->pool_stack[idx]) { |
| 170 | printf("%s: Out of memory for pool stack\n", __func__); |
| 171 | return -ENOMEM; |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx); |
| 176 | if (err) { |
| 177 | printf("%s: Error setting up NPA LF admin for lf %d\n", |
| 178 | __func__, nix->lf); |
| 179 | return err; |
| 180 | } |
| 181 | |
| 182 | /* Set up the auras */ |
| 183 | for (idx = 0; idx < NPA_POOL_COUNT; idx++) { |
| 184 | aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s)); |
| 185 | pool = npa->pool_ctx[idx]; |
| 186 | debug("%s aura %p pool %p\n", __func__, aura, pool); |
| 187 | memset(aura, 0, sizeof(union npa_aura_s)); |
| 188 | aura->s.fc_ena = 0; |
| 189 | aura->s.pool_addr = (u64)npa->pool_ctx[idx]; |
| 190 | debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__, |
| 191 | aura->s.pool_addr, npa->pool_ctx[idx]); |
| 192 | aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8; |
| 193 | aura->s.count = npa->q_len[idx]; |
| 194 | aura->s.limit = npa->q_len[idx]; |
| 195 | aura->s.ena = 1; |
| 196 | err = npa_attach_aura(nix_af, nix->lf, aura, idx); |
| 197 | if (err) |
| 198 | return err; |
| 199 | |
| 200 | memset(pool, 0, sizeof(*pool)); |
| 201 | pool->s.fc_ena = 0; |
| 202 | pool->s.nat_align = 1; |
| 203 | pool->s.stack_base = (u64)(npa->pool_stack[idx]); |
| 204 | debug("%s pool.s.stack_base %llx stack_base %p\n", __func__, |
| 205 | pool->s.stack_base, npa->pool_stack[idx]); |
| 206 | pool->s.buf_size = |
| 207 | npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE; |
| 208 | pool->s.stack_max_pages = npa->stack_pages[idx]; |
| 209 | pool->s.shift = |
| 210 | 64 - __builtin_clzll(npa->pool_stack_pointers) - 8; |
| 211 | pool->s.ptr_start = 0; |
| 212 | pool->s.ptr_end = (1ULL << 40) - 1; |
| 213 | pool->s.ena = 1; |
| 214 | err = npa_attach_pool(nix_af, nix->lf, pool, idx); |
| 215 | if (err) |
| 216 | return err; |
| 217 | } |
| 218 | |
| 219 | for (idx = 0; idx < NPA_POOL_COUNT; idx++) { |
| 220 | npa->buffers[idx] = nix_memalloc(npa->q_len[idx], |
| 221 | sizeof(void *), |
| 222 | "buffers"); |
| 223 | if (!npa->buffers[idx]) { |
| 224 | printf("%s: Out of memory\n", __func__); |
| 225 | return -ENOMEM; |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | for (idx = 0; idx < NPA_POOL_COUNT; idx++) { |
| 230 | err = npa_setup_pool(npa, idx, npa->buf_size[idx], |
| 231 | npa->q_len[idx], npa->buffers[idx]); |
| 232 | if (err) { |
| 233 | printf("%s: Error setting up pool %d\n", |
| 234 | __func__, idx); |
| 235 | return err; |
| 236 | } |
| 237 | } |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | int npa_lf_shutdown(struct nix *nix) |
| 242 | { |
| 243 | struct npa *npa = nix->npa; |
| 244 | int err; |
| 245 | int pool; |
| 246 | |
| 247 | err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT); |
| 248 | if (err) { |
| 249 | printf("%s: Error %d shutting down NPA LF admin\n", |
| 250 | __func__, err); |
| 251 | return err; |
| 252 | } |
| 253 | free(npa->aura_ctx); |
| 254 | npa->aura_ctx = NULL; |
| 255 | |
| 256 | for (pool = 0; pool < NPA_POOL_COUNT; pool++) { |
| 257 | free(npa->pool_ctx[pool]); |
| 258 | npa->pool_ctx[pool] = NULL; |
| 259 | free(npa->pool_stack[pool]); |
| 260 | npa->pool_stack[pool] = NULL; |
| 261 | free(npa->buffers[pool]); |
| 262 | npa->buffers[pool] = NULL; |
| 263 | } |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | int nix_lf_setup(struct nix *nix) |
| 269 | { |
| 270 | struct nix_af *nix_af = nix->nix_af; |
| 271 | int idx; |
| 272 | int err = -1; |
| 273 | |
| 274 | /* Alloc NIX RQ HW context memory */ |
| 275 | nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz, |
| 276 | "RQ CTX"); |
| 277 | if (!nix->rq_ctx_base) |
| 278 | goto error; |
| 279 | memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz); |
| 280 | |
| 281 | /* Alloc NIX SQ HW context memory */ |
| 282 | nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz, |
| 283 | "SQ CTX"); |
| 284 | if (!nix->sq_ctx_base) |
| 285 | goto error; |
| 286 | memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz); |
| 287 | |
| 288 | /* Alloc NIX CQ HW context memory */ |
| 289 | nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz, |
| 290 | "CQ CTX"); |
| 291 | if (!nix->cq_ctx_base) |
| 292 | goto error; |
| 293 | memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT); |
| 294 | /* Alloc NIX CQ Ring memory */ |
| 295 | for (idx = 0; idx < NIX_CQ_COUNT; idx++) { |
| 296 | err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE); |
| 297 | if (err) |
| 298 | goto error; |
| 299 | } |
| 300 | |
| 301 | /* Alloc memory for Qints HW contexts */ |
| 302 | nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz, |
| 303 | "Qint CTX"); |
| 304 | if (!nix->qint_base) |
| 305 | goto error; |
| 306 | /* Alloc memory for CQints HW contexts */ |
| 307 | nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz, |
| 308 | "Cint CTX"); |
| 309 | if (!nix->cint_base) |
| 310 | goto error; |
| 311 | /* Alloc NIX RSS HW context memory and config the base */ |
| 312 | nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz, |
| 313 | "RSS CTX"); |
| 314 | if (!nix->rss_base) |
| 315 | goto error; |
| 316 | |
| 317 | err = nix_lf_admin_setup(nix); |
| 318 | if (err) { |
| 319 | printf("%s: Error setting up LF\n", __func__); |
| 320 | goto error; |
| 321 | } |
| 322 | |
| 323 | return 0; |
| 324 | |
| 325 | error: |
| 326 | if (nix->rq_ctx_base) |
| 327 | free(nix->rq_ctx_base); |
| 328 | nix->rq_ctx_base = NULL; |
| 329 | if (nix->rq_ctx_base) |
| 330 | free(nix->rq_ctx_base); |
| 331 | nix->rq_ctx_base = NULL; |
| 332 | if (nix->sq_ctx_base) |
| 333 | free(nix->sq_ctx_base); |
| 334 | nix->sq_ctx_base = NULL; |
| 335 | if (nix->cq_ctx_base) |
| 336 | free(nix->cq_ctx_base); |
| 337 | nix->cq_ctx_base = NULL; |
| 338 | |
| 339 | for (idx = 0; idx < NIX_CQ_COUNT; idx++) |
| 340 | qmem_free(&nix->cq[idx]); |
| 341 | |
| 342 | return err; |
| 343 | } |
| 344 | |
| 345 | int nix_lf_shutdown(struct nix *nix) |
| 346 | { |
| 347 | struct nix_af *nix_af = nix->nix_af; |
| 348 | int index; |
| 349 | int err; |
| 350 | |
| 351 | err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt, |
| 352 | nix->rq_cnt, nix->sq_cnt); |
| 353 | if (err) { |
| 354 | printf("%s: Error shutting down LF admin\n", __func__); |
| 355 | return err; |
| 356 | } |
| 357 | |
| 358 | if (nix->rq_ctx_base) |
| 359 | free(nix->rq_ctx_base); |
| 360 | nix->rq_ctx_base = NULL; |
| 361 | if (nix->rq_ctx_base) |
| 362 | free(nix->rq_ctx_base); |
| 363 | nix->rq_ctx_base = NULL; |
| 364 | if (nix->sq_ctx_base) |
| 365 | free(nix->sq_ctx_base); |
| 366 | nix->sq_ctx_base = NULL; |
| 367 | if (nix->cq_ctx_base) |
| 368 | free(nix->cq_ctx_base); |
| 369 | nix->cq_ctx_base = NULL; |
| 370 | |
| 371 | for (index = 0; index < NIX_CQ_COUNT; index++) |
| 372 | qmem_free(&nix->cq[index]); |
| 373 | |
| 374 | debug("%s: nix lf %d reset --\n", __func__, nix->lf); |
| 375 | return 0; |
| 376 | } |
| 377 | |
| 378 | struct nix *nix_lf_alloc(struct udevice *dev) |
| 379 | { |
| 380 | union rvu_func_addr_s block_addr; |
| 381 | struct nix *nix; |
| 382 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 383 | struct rvu_af *rvu_af = dev_get_priv(rvu->afdev); |
| 384 | union rvu_pf_func_s pf_func; |
| 385 | int err; |
| 386 | |
| 387 | debug("%s(%s )\n", __func__, dev->name); |
| 388 | |
| 389 | nix = (struct nix *)calloc(1, sizeof(*nix)); |
| 390 | if (!nix) { |
| 391 | printf("%s: Out of memory for nix instance\n", __func__); |
| 392 | return NULL; |
| 393 | } |
| 394 | nix->nix_af = rvu_af->nix_af; |
| 395 | |
| 396 | block_addr.u = 0; |
| 397 | block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0); |
| 398 | nix->nix_base = rvu->pf_base + block_addr.u; |
| 399 | block_addr.u = 0; |
| 400 | block_addr.s.block = RVU_BLOCK_ADDR_E_NPC; |
| 401 | nix->npc_base = rvu->pf_base + block_addr.u; |
| 402 | block_addr.u = 0; |
| 403 | block_addr.s.block = RVU_BLOCK_ADDR_E_LMT; |
| 404 | nix->lmt_base = rvu->pf_base + block_addr.u; |
| 405 | |
| 406 | pf_func.u = 0; |
| 407 | pf_func.s.pf = rvu->pfid; |
| 408 | nix->pf_func = pf_func.u; |
| 409 | nix->lf = rvu->nix_lfid; |
| 410 | nix->pf = rvu->pfid; |
| 411 | nix->dev = dev; |
| 412 | nix->sq_cnt = 1; |
| 413 | nix->rq_cnt = 1; |
| 414 | nix->rss_grps = 1; |
| 415 | nix->cq_cnt = 2; |
| 416 | nix->xqe_sz = NIX_CQE_SIZE_W16; |
| 417 | |
| 418 | nix->lmac = nix_get_cgx_lmac(nix->pf); |
| 419 | if (!nix->lmac) { |
| 420 | printf("%s: Error: could not find lmac for pf %d\n", |
| 421 | __func__, nix->pf); |
| 422 | free(nix); |
| 423 | return NULL; |
| 424 | } |
| 425 | nix->lmac->link_num = |
| 426 | NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id, |
| 427 | nix->lmac->lmac_id); |
| 428 | nix->lmac->chan_num = |
| 429 | NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id, |
| 430 | nix->lmac->lmac_id, 0); |
| 431 | /* This is rx pkind in 1:1 mapping to NIX_LINK_E */ |
| 432 | nix->lmac->pknd = nix->lmac->link_num; |
| 433 | |
| 434 | cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd); |
| 435 | debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name, |
| 436 | nix->lmac->cgx->cgx_id, nix->lmac->lmac_id); |
| 437 | debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name, |
| 438 | nix->lmac->link_num, nix->lmac->chan_num, nix->lmac->pknd); |
| 439 | |
| 440 | err = npa_lf_setup(nix); |
| 441 | if (err) |
| 442 | return NULL; |
| 443 | |
| 444 | err = npc_lf_setup(nix); |
| 445 | if (err) |
| 446 | return NULL; |
| 447 | |
| 448 | err = nix_lf_setup(nix); |
| 449 | if (err) |
| 450 | return NULL; |
| 451 | |
| 452 | return nix; |
| 453 | } |
| 454 | |
| 455 | u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id) |
| 456 | { |
| 457 | union npa_lf_aura_op_allocx op_allocx; |
| 458 | |
| 459 | op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base + |
| 460 | NPA_LF_AURA_OP_ALLOCX(0), aura_id); |
| 461 | return op_allocx.s.addr; |
| 462 | } |
| 463 | |
| 464 | u64 nix_cq_op_status(struct nix *nix, u64 cq_id) |
| 465 | { |
| 466 | union nixx_lf_cq_op_status op_status; |
| 467 | s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS(); |
| 468 | |
| 469 | op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32); |
| 470 | return op_status.u; |
| 471 | } |
| 472 | |
| 473 | /* TX */ |
| 474 | static inline void nix_write_lmt(struct nix *nix, void *buffer, |
| 475 | int num_words) |
| 476 | { |
| 477 | int i; |
| 478 | |
| 479 | u64 *lmt_ptr = lmt_store_ptr(nix); |
| 480 | u64 *ptr = buffer; |
| 481 | |
| 482 | debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr); |
| 483 | for (i = 0; i < num_words; i++) { |
| 484 | debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i], |
| 485 | lmt_ptr + i); |
| 486 | lmt_ptr[i] = ptr[i]; |
| 487 | } |
| 488 | } |
| 489 | |
| 490 | void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe) |
| 491 | { |
| 492 | union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe; |
| 493 | |
| 494 | debug("%s: txcqe: %p\n", __func__, txcqe); |
| 495 | |
| 496 | if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) { |
| 497 | printf("%s: Error: Unsupported CQ header type %d\n", |
| 498 | __func__, txcqe->s.cqe_type); |
| 499 | return; |
| 500 | } |
| 501 | nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), |
| 502 | (NIX_CQ_TX << 32) | 1); |
| 503 | } |
| 504 | |
| 505 | void nix_lf_flush_tx(struct udevice *dev) |
| 506 | { |
| 507 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 508 | struct nix *nix = rvu->nix; |
| 509 | union nixx_lf_cq_op_status op_status; |
| 510 | u32 head, tail; |
| 511 | void *cq_tx_base = nix->cq[NIX_CQ_TX].base; |
| 512 | union nix_cqe_hdr_s *cqe; |
| 513 | |
| 514 | /* ack tx cqe entries */ |
| 515 | op_status.u = nix_cq_op_status(nix, NIX_CQ_TX); |
| 516 | head = op_status.s.head; |
| 517 | tail = op_status.s.tail; |
| 518 | head &= (nix->cq[NIX_CQ_TX].qsize - 1); |
| 519 | tail &= (nix->cq[NIX_CQ_TX].qsize - 1); |
| 520 | |
| 521 | debug("%s cq tx head %d tail %d\n", __func__, head, tail); |
| 522 | while (head != tail) { |
| 523 | cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz; |
| 524 | nix_cqe_tx_pkt_handler(nix, cqe); |
| 525 | op_status.u = nix_cq_op_status(nix, NIX_CQ_TX); |
| 526 | head = op_status.s.head; |
| 527 | tail = op_status.s.tail; |
| 528 | head &= (nix->cq[NIX_CQ_TX].qsize - 1); |
| 529 | tail &= (nix->cq[NIX_CQ_TX].qsize - 1); |
| 530 | debug("%s cq tx head %d tail %d\n", __func__, head, tail); |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len) |
| 535 | { |
| 536 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 537 | struct nix *nix = rvu->nix; |
| 538 | struct nix_tx_dr tx_dr; |
| 539 | int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1; |
| 540 | s64 result; |
| 541 | void *packet; |
| 542 | |
| 543 | nix_lf_flush_tx(dev); |
| 544 | memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr)); |
| 545 | /* Dump TX packet in to NPA buffer */ |
| 546 | packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX); |
| 547 | if (!packet) { |
| 548 | printf("%s TX buffers unavailable\n", __func__); |
| 549 | return -1; |
| 550 | } |
| 551 | memcpy(packet, pkt, pkt_len); |
| 552 | debug("%s TX buffer %p\n", __func__, packet); |
| 553 | |
| 554 | tx_dr.hdr.s.aura = NPA_POOL_TX; |
| 555 | tx_dr.hdr.s.df = 0; |
| 556 | tx_dr.hdr.s.pnc = 1; |
| 557 | tx_dr.hdr.s.sq = 0; |
| 558 | tx_dr.hdr.s.total = pkt_len; |
| 559 | tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */ |
| 560 | debug("%s dr_sz %d\n", __func__, dr_sz); |
| 561 | |
| 562 | tx_dr.tx_sg.s.segs = 1; |
| 563 | tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG; |
| 564 | tx_dr.tx_sg.s.seg1_size = pkt_len; |
| 565 | tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT; |
| 566 | tx_dr.sg1_addr = (dma_addr_t)packet; |
| 567 | |
| 568 | #define DEBUG_PKT |
| 569 | #ifdef DEBUG_PKT |
| 570 | debug("TX PKT Data\n"); |
| 571 | for (int i = 0; i < pkt_len; i++) { |
| 572 | if (i && (i % 8 == 0)) |
| 573 | debug("\n"); |
| 574 | debug("%02x ", *((u8 *)pkt + i)); |
| 575 | } |
| 576 | debug("\n"); |
| 577 | #endif |
| 578 | do { |
| 579 | nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2); |
| 580 | __iowmb(); |
| 581 | result = lmt_submit((u64)(nix->nix_base + |
| 582 | NIXX_LF_OP_SENDX(0))); |
Stefan Roese | 29caf93 | 2022-09-02 14:10:46 +0200 | [diff] [blame] | 583 | schedule(); |
Suneel Garapati | 4684a7a | 2020-08-26 14:37:42 +0200 | [diff] [blame] | 584 | } while (result == 0); |
| 585 | |
| 586 | return 0; |
| 587 | } |
| 588 | |
| 589 | /* RX */ |
| 590 | void nix_lf_flush_rx(struct udevice *dev) |
| 591 | { |
| 592 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 593 | struct nix *nix = rvu->nix; |
| 594 | union nixx_lf_cq_op_status op_status; |
| 595 | void *cq_rx_base = nix->cq[NIX_CQ_RX].base; |
| 596 | struct nix_rx_dr *rx_dr; |
| 597 | union nix_rx_parse_s *rxparse; |
| 598 | u32 head, tail; |
| 599 | u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz; |
| 600 | u64 *seg; |
| 601 | |
| 602 | /* flush rx cqe entries */ |
| 603 | op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); |
| 604 | head = op_status.s.head; |
| 605 | tail = op_status.s.tail; |
| 606 | head &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 607 | tail &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 608 | |
| 609 | debug("%s cq rx head %d tail %d\n", __func__, head, tail); |
| 610 | while (head != tail) { |
| 611 | rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz; |
| 612 | rxparse = &rx_dr->rx_parse; |
| 613 | |
| 614 | debug("%s: rx parse: %p\n", __func__, rxparse); |
| 615 | debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n", |
| 616 | __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1); |
| 617 | |
| 618 | seg = (dma_addr_t *)(&rx_dr->rx_sg + 1); |
| 619 | |
| 620 | st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), |
| 621 | seg[0], (1ULL << 63) | NPA_POOL_RX); |
| 622 | |
| 623 | debug("%s return %llx to NPA\n", __func__, seg[0]); |
| 624 | nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), |
| 625 | (NIX_CQ_RX << 32) | 1); |
| 626 | |
| 627 | op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); |
| 628 | head = op_status.s.head; |
| 629 | tail = op_status.s.tail; |
| 630 | head &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 631 | tail &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 632 | debug("%s cq rx head %d tail %d\n", __func__, head, tail); |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len) |
| 637 | { |
| 638 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 639 | struct nix *nix = rvu->nix; |
| 640 | |
| 641 | /* Return rx packet to NPA */ |
| 642 | debug("%s return %p to NPA\n", __func__, pkt); |
| 643 | st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt, |
| 644 | (1ULL << 63) | NPA_POOL_RX); |
| 645 | nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), |
| 646 | (NIX_CQ_RX << 32) | 1); |
| 647 | |
| 648 | nix_lf_flush_tx(dev); |
| 649 | return 0; |
| 650 | } |
| 651 | |
| 652 | int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp) |
| 653 | { |
| 654 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 655 | struct nix *nix = rvu->nix; |
| 656 | union nixx_lf_cq_op_status op_status; |
| 657 | void *cq_rx_base = nix->cq[NIX_CQ_RX].base; |
| 658 | struct nix_rx_dr *rx_dr; |
| 659 | union nix_rx_parse_s *rxparse; |
| 660 | void *pkt, *cqe; |
| 661 | int pkt_len = 0; |
| 662 | u64 *addr; |
| 663 | u32 head, tail; |
| 664 | |
| 665 | /* fetch rx cqe entries */ |
| 666 | op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); |
| 667 | head = op_status.s.head; |
| 668 | tail = op_status.s.tail; |
| 669 | head &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 670 | tail &= (nix->cq[NIX_CQ_RX].qsize - 1); |
| 671 | debug("%s cq rx head %d tail %d\n", __func__, head, tail); |
| 672 | if (head == tail) |
| 673 | return -EAGAIN; |
| 674 | |
| 675 | debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head, |
| 676 | nix->cq[NIX_CQ_RX].entry_sz); |
| 677 | cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz; |
| 678 | rx_dr = (struct nix_rx_dr *)cqe; |
| 679 | rxparse = &rx_dr->rx_parse; |
| 680 | |
| 681 | debug("%s: rx completion: %p\n", __func__, cqe); |
| 682 | debug("%s: rx dr: %p\n", __func__, rx_dr); |
| 683 | debug("%s: rx parse: %p\n", __func__, rxparse); |
| 684 | debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n", |
| 685 | __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1); |
| 686 | debug("%s: rx parse: pkind %x chan %x\n", |
| 687 | __func__, rxparse->s.pkind, rxparse->s.chan); |
| 688 | |
| 689 | if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) { |
| 690 | printf("%s: Error: Unsupported CQ header type in Rx %d\n", |
| 691 | __func__, rx_dr->hdr.s.cqe_type); |
| 692 | return -1; |
| 693 | } |
| 694 | |
| 695 | pkt_len = rxparse->s.pkt_lenm1 + 1; |
| 696 | addr = (dma_addr_t *)(&rx_dr->rx_sg + 1); |
| 697 | pkt = (void *)addr[0]; |
| 698 | |
| 699 | debug("%s: segs: %d (%d@0x%llx, %d@0x%llx, %d@0x%llx)\n", __func__, |
| 700 | rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0], |
| 701 | rx_dr->rx_sg.s.seg2_size, addr[1], |
| 702 | rx_dr->rx_sg.s.seg3_size, addr[2]); |
| 703 | if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size + |
| 704 | rx_dr->rx_sg.s.seg3_size) { |
| 705 | debug("%s: Error: rx buffer size too small\n", __func__); |
| 706 | return -1; |
| 707 | } |
| 708 | |
| 709 | __iowmb(); |
| 710 | #define DEBUG_PKT |
| 711 | #ifdef DEBUG_PKT |
| 712 | debug("RX PKT Data\n"); |
| 713 | for (int i = 0; i < pkt_len; i++) { |
| 714 | if (i && (i % 8 == 0)) |
| 715 | debug("\n"); |
| 716 | debug("%02x ", *((u8 *)pkt + i)); |
| 717 | } |
| 718 | debug("\n"); |
| 719 | #endif |
| 720 | |
| 721 | *packetp = (uchar *)pkt; |
| 722 | |
| 723 | return pkt_len; |
| 724 | } |
| 725 | |
| 726 | int nix_lf_setup_mac(struct udevice *dev) |
| 727 | { |
| 728 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 729 | struct nix *nix = rvu->nix; |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 730 | struct eth_pdata *pdata = dev_get_plat(dev); |
Suneel Garapati | 4684a7a | 2020-08-26 14:37:42 +0200 | [diff] [blame] | 731 | |
| 732 | /* If lower level firmware fails to set proper MAC |
| 733 | * u-boot framework updates MAC to random address. |
| 734 | * Use this hook to update mac address in cgx lmac |
| 735 | * and call mac filter setup to update new address. |
| 736 | */ |
| 737 | if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) { |
| 738 | memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6); |
Simon Glass | 8b85dfc | 2020-12-16 21:20:07 -0700 | [diff] [blame] | 739 | eth_env_set_enetaddr_by_index("eth", dev_seq(rvu->dev), |
Suneel Garapati | 4684a7a | 2020-08-26 14:37:42 +0200 | [diff] [blame] | 740 | pdata->enetaddr); |
| 741 | cgx_lmac_mac_filter_setup(nix->lmac); |
| 742 | /* Update user given MAC address to ATF for update |
| 743 | * in sh_fwdata to use in Linux. |
| 744 | */ |
| 745 | cgx_intf_set_macaddr(dev); |
| 746 | debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr); |
| 747 | debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); |
| 748 | } |
| 749 | debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr); |
| 750 | return 0; |
| 751 | } |
| 752 | |
| 753 | void nix_lf_halt(struct udevice *dev) |
| 754 | { |
| 755 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 756 | struct nix *nix = rvu->nix; |
| 757 | |
| 758 | cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false); |
| 759 | |
| 760 | mdelay(1); |
| 761 | |
| 762 | /* Flush tx and rx descriptors */ |
| 763 | nix_lf_flush_rx(dev); |
| 764 | nix_lf_flush_tx(dev); |
| 765 | } |
| 766 | |
| 767 | int nix_lf_init(struct udevice *dev) |
| 768 | { |
| 769 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 770 | struct nix *nix = rvu->nix; |
| 771 | struct lmac *lmac = nix->lmac; |
| 772 | int ret; |
| 773 | u64 link_sts; |
| 774 | u8 link, speed; |
| 775 | u16 errcode; |
| 776 | |
| 777 | printf("Waiting for CGX%d LMAC%d [%s] link status...", |
| 778 | lmac->cgx->cgx_id, lmac->lmac_id, |
| 779 | lmac_type_to_str[lmac->lmac_type]); |
| 780 | |
| 781 | if (lmac->init_pend) { |
| 782 | /* Bring up LMAC */ |
| 783 | ret = cgx_lmac_link_enable(lmac, lmac->lmac_id, |
| 784 | true, &link_sts); |
| 785 | lmac->init_pend = 0; |
| 786 | } else { |
| 787 | ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts); |
| 788 | } |
| 789 | |
| 790 | if (ret) { |
| 791 | printf(" [Down]\n"); |
| 792 | return -1; |
| 793 | } |
| 794 | |
| 795 | link = link_sts & 0x1; |
| 796 | speed = (link_sts >> 2) & 0xf; |
| 797 | errcode = (link_sts >> 6) & 0x2ff; |
| 798 | debug("%s: link %x speed %x errcode %x\n", |
| 799 | __func__, link, speed, errcode); |
| 800 | |
| 801 | /* Print link status */ |
| 802 | printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down"); |
| 803 | if (!link) |
| 804 | return -1; |
| 805 | |
| 806 | if (!lmac->init_pend) |
| 807 | cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true); |
| 808 | |
| 809 | return 0; |
| 810 | } |
| 811 | |
| 812 | void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid) |
| 813 | { |
| 814 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 815 | struct nix *nix = rvu->nix; |
| 816 | struct lmac *lmac = nix->lmac; |
| 817 | |
| 818 | *cgxid = lmac->cgx->cgx_id; |
| 819 | *lmacid = lmac->lmac_id; |
| 820 | } |
| 821 | |
| 822 | void nix_print_mac_info(struct udevice *dev) |
| 823 | { |
| 824 | struct rvu_pf *rvu = dev_get_priv(dev); |
| 825 | struct nix *nix = rvu->nix; |
| 826 | struct lmac *lmac = nix->lmac; |
| 827 | |
| 828 | printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id, |
| 829 | lmac_type_to_str[lmac->lmac_type]); |
| 830 | } |