Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /** |
| 3 | * ufs.c - Universal Flash Subsystem (UFS) driver |
| 4 | * |
| 5 | * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported |
| 6 | * to u-boot. |
| 7 | * |
| 8 | * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com |
| 9 | */ |
| 10 | |
Marek Vasut | 91913a1 | 2023-08-16 17:05:50 +0200 | [diff] [blame] | 11 | #include <bouncebuf.h> |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 12 | #include <charset.h> |
| 13 | #include <common.h> |
| 14 | #include <dm.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 15 | #include <log.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 16 | #include <dm/device_compat.h> |
Simon Glass | 61b29b8 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 17 | #include <dm/devres.h> |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 18 | #include <dm/lists.h> |
| 19 | #include <dm/device-internal.h> |
| 20 | #include <malloc.h> |
| 21 | #include <hexdump.h> |
| 22 | #include <scsi.h> |
Simon Glass | 98eb4ce | 2020-07-19 10:15:54 -0600 | [diff] [blame] | 23 | #include <asm/io.h> |
| 24 | #include <asm/dma-mapping.h> |
Simon Glass | cd93d62 | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 25 | #include <linux/bitops.h> |
Simon Glass | c05ed00 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 26 | #include <linux/delay.h> |
Masahiro Yamada | 9d86b89 | 2020-02-14 16:40:19 +0900 | [diff] [blame] | 27 | #include <linux/dma-mapping.h> |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 28 | |
| 29 | #include "ufs.h" |
| 30 | |
| 31 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
| 32 | UTP_TASK_REQ_COMPL |\ |
| 33 | UFSHCD_ERROR_MASK) |
| 34 | /* maximum number of link-startup retries */ |
| 35 | #define DME_LINKSTARTUP_RETRIES 3 |
| 36 | |
| 37 | /* maximum number of retries for a general UIC command */ |
| 38 | #define UFS_UIC_COMMAND_RETRIES 3 |
| 39 | |
| 40 | /* Query request retries */ |
| 41 | #define QUERY_REQ_RETRIES 3 |
| 42 | /* Query request timeout */ |
| 43 | #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ |
| 44 | |
| 45 | /* maximum timeout in ms for a general UIC command */ |
| 46 | #define UFS_UIC_CMD_TIMEOUT 1000 |
| 47 | /* NOP OUT retries waiting for NOP IN response */ |
| 48 | #define NOP_OUT_RETRIES 10 |
| 49 | /* Timeout after 30 msecs if NOP OUT hangs without response */ |
| 50 | #define NOP_OUT_TIMEOUT 30 /* msecs */ |
| 51 | |
| 52 | /* Only use one Task Tag for all requests */ |
| 53 | #define TASK_TAG 0 |
| 54 | |
| 55 | /* Expose the flag value from utp_upiu_query.value */ |
| 56 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF |
| 57 | |
| 58 | #define MAX_PRDT_ENTRY 262144 |
| 59 | |
| 60 | /* maximum bytes per request */ |
| 61 | #define UFS_MAX_BYTES (128 * 256 * 1024) |
| 62 | |
Bhupesh Sharma | 1d09990 | 2023-08-11 12:02:29 +0530 | [diff] [blame] | 63 | #define ufshcd_hex_dump(prefix_str, buf, len) do { \ |
| 64 | size_t __len = (len); \ |
| 65 | print_hex_dump(prefix_str, \ |
| 66 | DUMP_PREFIX_OFFSET, \ |
| 67 | 16, 4, buf, __len, false); \ |
| 68 | } while (0) |
| 69 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 70 | static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); |
| 71 | static inline void ufshcd_hba_stop(struct ufs_hba *hba); |
| 72 | static int ufshcd_hba_enable(struct ufs_hba *hba); |
| 73 | |
Bhupesh Sharma | 1d09990 | 2023-08-11 12:02:29 +0530 | [diff] [blame] | 74 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, |
| 75 | const char *prefix) |
| 76 | { |
| 77 | u32 *regs; |
| 78 | size_t pos; |
| 79 | |
| 80 | if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ |
| 81 | return -EINVAL; |
| 82 | |
| 83 | regs = kzalloc(len, GFP_KERNEL); |
| 84 | if (!regs) |
| 85 | return -ENOMEM; |
| 86 | |
| 87 | for (pos = 0; pos < len; pos += 4) { |
| 88 | if (offset == 0 && |
| 89 | pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && |
| 90 | pos <= REG_UIC_ERROR_CODE_DME) |
| 91 | continue; |
| 92 | regs[pos / 4] = ufshcd_readl(hba, offset + pos); |
| 93 | } |
| 94 | |
| 95 | ufshcd_hex_dump(prefix, regs, len); |
| 96 | kfree(regs); |
| 97 | |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) |
| 102 | { |
| 103 | int prdt_length; |
| 104 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
| 105 | |
| 106 | dev_info(hba->dev, |
| 107 | "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", |
| 108 | tag, (u64)hba->utrdl); |
| 109 | |
| 110 | ufshcd_hex_dump("UPIU TRD: ", hba->utrdl, |
| 111 | sizeof(struct utp_transfer_req_desc)); |
| 112 | dev_info(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, |
| 113 | (u64)hba->ucd_req_ptr); |
| 114 | ufshcd_hex_dump("UPIU REQ: ", hba->ucd_req_ptr, |
| 115 | sizeof(struct utp_upiu_req)); |
| 116 | dev_info(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, |
| 117 | (u64)hba->ucd_rsp_ptr); |
| 118 | ufshcd_hex_dump("UPIU RSP: ", hba->ucd_rsp_ptr, |
| 119 | sizeof(struct utp_upiu_rsp)); |
| 120 | |
| 121 | prdt_length = le16_to_cpu(req_desc->prd_table_length); |
| 122 | |
| 123 | dev_info(hba->dev, |
| 124 | "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", |
| 125 | tag, prdt_length, |
| 126 | (u64)hba->ucd_prdt_ptr); |
| 127 | |
| 128 | if (pr_prdt) |
| 129 | ufshcd_hex_dump("UPIU PRDT: ", hba->ucd_prdt_ptr, |
| 130 | sizeof(struct ufshcd_sg_entry) * prdt_length); |
| 131 | } |
| 132 | |
| 133 | void ufshcd_dbg_register_dump(struct ufs_hba *hba) |
| 134 | { |
| 135 | ufshcd_ops_dbg_register_dump(hba); |
| 136 | } |
| 137 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 138 | /* |
| 139 | * ufshcd_wait_for_register - wait for register value to change |
| 140 | */ |
| 141 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, |
| 142 | u32 val, unsigned long timeout_ms) |
| 143 | { |
| 144 | int err = 0; |
| 145 | unsigned long start = get_timer(0); |
| 146 | |
| 147 | /* ignore bits that we don't intend to wait on */ |
| 148 | val = val & mask; |
| 149 | |
| 150 | while ((ufshcd_readl(hba, reg) & mask) != val) { |
| 151 | if (get_timer(start) > timeout_ms) { |
| 152 | if ((ufshcd_readl(hba, reg) & mask) != val) |
| 153 | err = -ETIMEDOUT; |
| 154 | break; |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | return err; |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * ufshcd_init_pwr_info - setting the POR (power on reset) |
| 163 | * values in hba power info |
| 164 | */ |
| 165 | static void ufshcd_init_pwr_info(struct ufs_hba *hba) |
| 166 | { |
| 167 | hba->pwr_info.gear_rx = UFS_PWM_G1; |
| 168 | hba->pwr_info.gear_tx = UFS_PWM_G1; |
| 169 | hba->pwr_info.lane_rx = 1; |
| 170 | hba->pwr_info.lane_tx = 1; |
| 171 | hba->pwr_info.pwr_rx = SLOWAUTO_MODE; |
| 172 | hba->pwr_info.pwr_tx = SLOWAUTO_MODE; |
| 173 | hba->pwr_info.hs_rate = 0; |
| 174 | } |
| 175 | |
| 176 | /** |
| 177 | * ufshcd_print_pwr_info - print power params as saved in hba |
| 178 | * power info |
| 179 | */ |
| 180 | static void ufshcd_print_pwr_info(struct ufs_hba *hba) |
| 181 | { |
| 182 | static const char * const names[] = { |
| 183 | "INVALID MODE", |
| 184 | "FAST MODE", |
| 185 | "SLOW_MODE", |
| 186 | "INVALID MODE", |
| 187 | "FASTAUTO_MODE", |
| 188 | "SLOWAUTO_MODE", |
| 189 | "INVALID MODE", |
| 190 | }; |
| 191 | |
| 192 | dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", |
| 193 | hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, |
| 194 | hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, |
| 195 | names[hba->pwr_info.pwr_rx], |
| 196 | names[hba->pwr_info.pwr_tx], |
| 197 | hba->pwr_info.hs_rate); |
| 198 | } |
| 199 | |
| 200 | /** |
| 201 | * ufshcd_ready_for_uic_cmd - Check if controller is ready |
| 202 | * to accept UIC commands |
| 203 | */ |
| 204 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) |
| 205 | { |
| 206 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) |
| 207 | return true; |
| 208 | else |
| 209 | return false; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * ufshcd_get_uic_cmd_result - Get the UIC command result |
| 214 | */ |
| 215 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) |
| 216 | { |
| 217 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
| 218 | MASK_UIC_COMMAND_RESULT; |
| 219 | } |
| 220 | |
| 221 | /** |
| 222 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command |
| 223 | */ |
| 224 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) |
| 225 | { |
| 226 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); |
| 227 | } |
| 228 | |
| 229 | /** |
| 230 | * ufshcd_is_device_present - Check if any device connected to |
| 231 | * the host controller |
| 232 | */ |
| 233 | static inline bool ufshcd_is_device_present(struct ufs_hba *hba) |
| 234 | { |
| 235 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
| 236 | DEVICE_PRESENT) ? true : false; |
| 237 | } |
| 238 | |
| 239 | /** |
| 240 | * ufshcd_send_uic_cmd - UFS Interconnect layer command API |
| 241 | * |
| 242 | */ |
| 243 | static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
| 244 | { |
| 245 | unsigned long start = 0; |
| 246 | u32 intr_status; |
| 247 | u32 enabled_intr_status; |
| 248 | |
| 249 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
| 250 | dev_err(hba->dev, |
| 251 | "Controller not ready to accept UIC commands\n"); |
| 252 | return -EIO; |
| 253 | } |
| 254 | |
| 255 | debug("sending uic command:%d\n", uic_cmd->command); |
| 256 | |
| 257 | /* Write Args */ |
| 258 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); |
| 259 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); |
| 260 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); |
| 261 | |
| 262 | /* Write UIC Cmd */ |
| 263 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, |
| 264 | REG_UIC_COMMAND); |
| 265 | |
| 266 | start = get_timer(0); |
| 267 | do { |
| 268 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
| 269 | enabled_intr_status = intr_status & hba->intr_mask; |
| 270 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
| 271 | |
| 272 | if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { |
| 273 | dev_err(hba->dev, |
| 274 | "Timedout waiting for UIC response\n"); |
| 275 | |
| 276 | return -ETIMEDOUT; |
| 277 | } |
| 278 | |
| 279 | if (enabled_intr_status & UFSHCD_ERROR_MASK) { |
| 280 | dev_err(hba->dev, "Error in status:%08x\n", |
| 281 | enabled_intr_status); |
| 282 | |
| 283 | return -1; |
| 284 | } |
| 285 | } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); |
| 286 | |
| 287 | uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); |
| 288 | uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); |
| 289 | |
| 290 | debug("Sent successfully\n"); |
| 291 | |
| 292 | return 0; |
| 293 | } |
| 294 | |
| 295 | /** |
| 296 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET |
| 297 | * |
| 298 | */ |
| 299 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, |
| 300 | u32 mib_val, u8 peer) |
| 301 | { |
| 302 | struct uic_command uic_cmd = {0}; |
| 303 | static const char *const action[] = { |
| 304 | "dme-set", |
| 305 | "dme-peer-set" |
| 306 | }; |
| 307 | const char *set = action[!!peer]; |
| 308 | int ret; |
| 309 | int retries = UFS_UIC_COMMAND_RETRIES; |
| 310 | |
| 311 | uic_cmd.command = peer ? |
| 312 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; |
| 313 | uic_cmd.argument1 = attr_sel; |
| 314 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); |
| 315 | uic_cmd.argument3 = mib_val; |
| 316 | |
| 317 | do { |
| 318 | /* for peer attributes we retry upon failure */ |
| 319 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
| 320 | if (ret) |
| 321 | dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", |
| 322 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); |
| 323 | } while (ret && peer && --retries); |
| 324 | |
| 325 | if (ret) |
| 326 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", |
| 327 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, |
| 328 | UFS_UIC_COMMAND_RETRIES - retries); |
| 329 | |
| 330 | return ret; |
| 331 | } |
| 332 | |
| 333 | /** |
| 334 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET |
| 335 | * |
| 336 | */ |
| 337 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, |
| 338 | u32 *mib_val, u8 peer) |
| 339 | { |
| 340 | struct uic_command uic_cmd = {0}; |
| 341 | static const char *const action[] = { |
| 342 | "dme-get", |
| 343 | "dme-peer-get" |
| 344 | }; |
| 345 | const char *get = action[!!peer]; |
| 346 | int ret; |
| 347 | int retries = UFS_UIC_COMMAND_RETRIES; |
| 348 | |
| 349 | uic_cmd.command = peer ? |
| 350 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; |
| 351 | uic_cmd.argument1 = attr_sel; |
| 352 | |
| 353 | do { |
| 354 | /* for peer attributes we retry upon failure */ |
| 355 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
| 356 | if (ret) |
| 357 | dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", |
| 358 | get, UIC_GET_ATTR_ID(attr_sel), ret); |
| 359 | } while (ret && peer && --retries); |
| 360 | |
| 361 | if (ret) |
| 362 | dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", |
| 363 | get, UIC_GET_ATTR_ID(attr_sel), |
| 364 | UFS_UIC_COMMAND_RETRIES - retries); |
| 365 | |
| 366 | if (mib_val && !ret) |
| 367 | *mib_val = uic_cmd.argument3; |
| 368 | |
| 369 | return ret; |
| 370 | } |
| 371 | |
| 372 | static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) |
| 373 | { |
| 374 | u32 tx_lanes, i, err = 0; |
| 375 | |
| 376 | if (!peer) |
| 377 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), |
| 378 | &tx_lanes); |
| 379 | else |
| 380 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), |
| 381 | &tx_lanes); |
| 382 | for (i = 0; i < tx_lanes; i++) { |
| 383 | if (!peer) |
| 384 | err = ufshcd_dme_set(hba, |
| 385 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, |
| 386 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), |
| 387 | 0); |
| 388 | else |
| 389 | err = ufshcd_dme_peer_set(hba, |
| 390 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, |
| 391 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), |
| 392 | 0); |
| 393 | if (err) { |
| 394 | dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", |
| 395 | __func__, peer, i, err); |
| 396 | break; |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | return err; |
| 401 | } |
| 402 | |
| 403 | static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) |
| 404 | { |
| 405 | return ufshcd_disable_tx_lcc(hba, true); |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup |
| 410 | * |
| 411 | */ |
| 412 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) |
| 413 | { |
| 414 | struct uic_command uic_cmd = {0}; |
| 415 | int ret; |
| 416 | |
| 417 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; |
| 418 | |
| 419 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
| 420 | if (ret) |
| 421 | dev_dbg(hba->dev, |
| 422 | "dme-link-startup: error code %d\n", ret); |
| 423 | return ret; |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * ufshcd_disable_intr_aggr - Disables interrupt aggregation. |
| 428 | * |
| 429 | */ |
| 430 | static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) |
| 431 | { |
| 432 | ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); |
| 433 | } |
| 434 | |
| 435 | /** |
| 436 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY |
| 437 | */ |
| 438 | static inline int ufshcd_get_lists_status(u32 reg) |
| 439 | { |
| 440 | return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); |
| 441 | } |
| 442 | |
| 443 | /** |
| 444 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, |
| 445 | * When run-stop registers are set to 1, it indicates the |
| 446 | * host controller that it can process the requests |
| 447 | */ |
| 448 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) |
| 449 | { |
| 450 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
| 451 | REG_UTP_TASK_REQ_LIST_RUN_STOP); |
| 452 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, |
| 453 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); |
| 454 | } |
| 455 | |
| 456 | /** |
| 457 | * ufshcd_enable_intr - enable interrupts |
| 458 | */ |
| 459 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
| 460 | { |
| 461 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
| 462 | u32 rw; |
| 463 | |
| 464 | if (hba->version == UFSHCI_VERSION_10) { |
| 465 | rw = set & INTERRUPT_MASK_RW_VER_10; |
| 466 | set = rw | ((set ^ intrs) & intrs); |
| 467 | } else { |
| 468 | set |= intrs; |
| 469 | } |
| 470 | |
| 471 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); |
| 472 | |
| 473 | hba->intr_mask = set; |
| 474 | } |
| 475 | |
| 476 | /** |
| 477 | * ufshcd_make_hba_operational - Make UFS controller operational |
| 478 | * |
| 479 | * To bring UFS host controller to operational state, |
| 480 | * 1. Enable required interrupts |
| 481 | * 2. Configure interrupt aggregation |
| 482 | * 3. Program UTRL and UTMRL base address |
| 483 | * 4. Configure run-stop-registers |
| 484 | * |
| 485 | */ |
| 486 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) |
| 487 | { |
| 488 | int err = 0; |
| 489 | u32 reg; |
| 490 | |
| 491 | /* Enable required interrupts */ |
| 492 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); |
| 493 | |
| 494 | /* Disable interrupt aggregation */ |
| 495 | ufshcd_disable_intr_aggr(hba); |
| 496 | |
| 497 | /* Configure UTRL and UTMRL base address registers */ |
| 498 | ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), |
| 499 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); |
| 500 | ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), |
| 501 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); |
| 502 | ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), |
| 503 | REG_UTP_TASK_REQ_LIST_BASE_L); |
| 504 | ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), |
| 505 | REG_UTP_TASK_REQ_LIST_BASE_H); |
| 506 | |
| 507 | /* |
Bhupesh Sharma | 2eb2a1e | 2023-08-14 11:58:20 +0530 | [diff] [blame] | 508 | * Make sure base address and interrupt setup are updated before |
| 509 | * enabling the run/stop registers below. |
| 510 | */ |
| 511 | wmb(); |
| 512 | |
| 513 | /* |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 514 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 |
| 515 | */ |
| 516 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
| 517 | if (!(ufshcd_get_lists_status(reg))) { |
| 518 | ufshcd_enable_run_stop_reg(hba); |
| 519 | } else { |
| 520 | dev_err(hba->dev, |
| 521 | "Host controller not ready to process requests"); |
| 522 | err = -EIO; |
| 523 | goto out; |
| 524 | } |
| 525 | |
| 526 | out: |
| 527 | return err; |
| 528 | } |
| 529 | |
| 530 | /** |
| 531 | * ufshcd_link_startup - Initialize unipro link startup |
| 532 | */ |
| 533 | static int ufshcd_link_startup(struct ufs_hba *hba) |
| 534 | { |
| 535 | int ret; |
| 536 | int retries = DME_LINKSTARTUP_RETRIES; |
| 537 | bool link_startup_again = true; |
| 538 | |
| 539 | link_startup: |
| 540 | do { |
| 541 | ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); |
| 542 | |
| 543 | ret = ufshcd_dme_link_startup(hba); |
| 544 | |
| 545 | /* check if device is detected by inter-connect layer */ |
| 546 | if (!ret && !ufshcd_is_device_present(hba)) { |
| 547 | dev_err(hba->dev, "%s: Device not present\n", __func__); |
| 548 | ret = -ENXIO; |
| 549 | goto out; |
| 550 | } |
| 551 | |
| 552 | /* |
| 553 | * DME link lost indication is only received when link is up, |
| 554 | * but we can't be sure if the link is up until link startup |
| 555 | * succeeds. So reset the local Uni-Pro and try again. |
| 556 | */ |
| 557 | if (ret && ufshcd_hba_enable(hba)) |
| 558 | goto out; |
| 559 | } while (ret && retries--); |
| 560 | |
| 561 | if (ret) |
| 562 | /* failed to get the link up... retire */ |
| 563 | goto out; |
| 564 | |
| 565 | if (link_startup_again) { |
| 566 | link_startup_again = false; |
| 567 | retries = DME_LINKSTARTUP_RETRIES; |
| 568 | goto link_startup; |
| 569 | } |
| 570 | |
| 571 | /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ |
| 572 | ufshcd_init_pwr_info(hba); |
| 573 | |
| 574 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { |
| 575 | ret = ufshcd_disable_device_tx_lcc(hba); |
| 576 | if (ret) |
| 577 | goto out; |
| 578 | } |
| 579 | |
| 580 | /* Include any host controller configuration via UIC commands */ |
| 581 | ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); |
| 582 | if (ret) |
| 583 | goto out; |
| 584 | |
Bhupesh Sharma | 34e0255 | 2023-08-14 11:33:41 +0530 | [diff] [blame] | 585 | /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ |
| 586 | ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 587 | ret = ufshcd_make_hba_operational(hba); |
| 588 | out: |
| 589 | if (ret) |
| 590 | dev_err(hba->dev, "link startup failed %d\n", ret); |
| 591 | |
| 592 | return ret; |
| 593 | } |
| 594 | |
| 595 | /** |
| 596 | * ufshcd_hba_stop - Send controller to reset state |
| 597 | */ |
| 598 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) |
| 599 | { |
| 600 | int err; |
| 601 | |
| 602 | ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); |
| 603 | err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, |
| 604 | CONTROLLER_ENABLE, CONTROLLER_DISABLE, |
| 605 | 10); |
| 606 | if (err) |
| 607 | dev_err(hba->dev, "%s: Controller disable failed\n", __func__); |
| 608 | } |
| 609 | |
| 610 | /** |
| 611 | * ufshcd_is_hba_active - Get controller state |
| 612 | */ |
| 613 | static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) |
| 614 | { |
| 615 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) |
| 616 | ? false : true; |
| 617 | } |
| 618 | |
| 619 | /** |
| 620 | * ufshcd_hba_start - Start controller initialization sequence |
| 621 | */ |
| 622 | static inline void ufshcd_hba_start(struct ufs_hba *hba) |
| 623 | { |
| 624 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
| 625 | } |
| 626 | |
| 627 | /** |
| 628 | * ufshcd_hba_enable - initialize the controller |
| 629 | */ |
| 630 | static int ufshcd_hba_enable(struct ufs_hba *hba) |
| 631 | { |
| 632 | int retry; |
| 633 | |
| 634 | if (!ufshcd_is_hba_active(hba)) |
| 635 | /* change controller state to "reset state" */ |
| 636 | ufshcd_hba_stop(hba); |
| 637 | |
| 638 | ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); |
| 639 | |
| 640 | /* start controller initialization sequence */ |
| 641 | ufshcd_hba_start(hba); |
| 642 | |
| 643 | /* |
| 644 | * To initialize a UFS host controller HCE bit must be set to 1. |
| 645 | * During initialization the HCE bit value changes from 1->0->1. |
| 646 | * When the host controller completes initialization sequence |
| 647 | * it sets the value of HCE bit to 1. The same HCE bit is read back |
| 648 | * to check if the controller has completed initialization sequence. |
| 649 | * So without this delay the value HCE = 1, set in the previous |
| 650 | * instruction might be read back. |
| 651 | * This delay can be changed based on the controller. |
| 652 | */ |
| 653 | mdelay(1); |
| 654 | |
| 655 | /* wait for the host controller to complete initialization */ |
| 656 | retry = 10; |
| 657 | while (ufshcd_is_hba_active(hba)) { |
| 658 | if (retry) { |
| 659 | retry--; |
| 660 | } else { |
| 661 | dev_err(hba->dev, "Controller enable failed\n"); |
| 662 | return -EIO; |
| 663 | } |
| 664 | mdelay(5); |
| 665 | } |
| 666 | |
| 667 | /* enable UIC related interrupts */ |
| 668 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); |
| 669 | |
| 670 | ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); |
| 671 | |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | /** |
| 676 | * ufshcd_host_memory_configure - configure local reference block with |
| 677 | * memory offsets |
| 678 | */ |
| 679 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) |
| 680 | { |
| 681 | struct utp_transfer_req_desc *utrdlp; |
| 682 | dma_addr_t cmd_desc_dma_addr; |
| 683 | u16 response_offset; |
| 684 | u16 prdt_offset; |
| 685 | |
| 686 | utrdlp = hba->utrdl; |
| 687 | cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; |
| 688 | |
| 689 | utrdlp->command_desc_base_addr_lo = |
| 690 | cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); |
| 691 | utrdlp->command_desc_base_addr_hi = |
| 692 | cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); |
| 693 | |
| 694 | response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); |
| 695 | prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); |
| 696 | |
| 697 | utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); |
| 698 | utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); |
| 699 | utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); |
| 700 | |
| 701 | hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; |
| 702 | hba->ucd_rsp_ptr = |
| 703 | (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; |
| 704 | hba->ucd_prdt_ptr = |
| 705 | (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; |
| 706 | } |
| 707 | |
| 708 | /** |
| 709 | * ufshcd_memory_alloc - allocate memory for host memory space data structures |
| 710 | */ |
| 711 | static int ufshcd_memory_alloc(struct ufs_hba *hba) |
| 712 | { |
| 713 | /* Allocate one Transfer Request Descriptor |
| 714 | * Should be aligned to 1k boundary. |
| 715 | */ |
| 716 | hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc)); |
| 717 | if (!hba->utrdl) { |
| 718 | dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); |
| 719 | return -ENOMEM; |
| 720 | } |
| 721 | |
| 722 | /* Allocate one Command Descriptor |
| 723 | * Should be aligned to 1k boundary. |
| 724 | */ |
| 725 | hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc)); |
| 726 | if (!hba->ucdl) { |
| 727 | dev_err(hba->dev, "Command descriptor memory allocation failed\n"); |
| 728 | return -ENOMEM; |
| 729 | } |
| 730 | |
| 731 | return 0; |
| 732 | } |
| 733 | |
| 734 | /** |
| 735 | * ufshcd_get_intr_mask - Get the interrupt bit mask |
| 736 | */ |
| 737 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) |
| 738 | { |
| 739 | u32 intr_mask = 0; |
| 740 | |
| 741 | switch (hba->version) { |
| 742 | case UFSHCI_VERSION_10: |
| 743 | intr_mask = INTERRUPT_MASK_ALL_VER_10; |
| 744 | break; |
| 745 | case UFSHCI_VERSION_11: |
| 746 | case UFSHCI_VERSION_20: |
| 747 | intr_mask = INTERRUPT_MASK_ALL_VER_11; |
| 748 | break; |
| 749 | case UFSHCI_VERSION_21: |
| 750 | default: |
| 751 | intr_mask = INTERRUPT_MASK_ALL_VER_21; |
| 752 | break; |
| 753 | } |
| 754 | |
| 755 | return intr_mask; |
| 756 | } |
| 757 | |
| 758 | /** |
| 759 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA |
| 760 | */ |
| 761 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) |
| 762 | { |
| 763 | return ufshcd_readl(hba, REG_UFS_VERSION); |
| 764 | } |
| 765 | |
| 766 | /** |
| 767 | * ufshcd_get_upmcrs - Get the power mode change request status |
| 768 | */ |
| 769 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) |
| 770 | { |
| 771 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; |
| 772 | } |
| 773 | |
| 774 | /** |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 775 | * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache |
| 776 | * |
| 777 | * Flush and invalidate cache in aligned address..address+size range. |
| 778 | * The invalidation is in place to avoid stale data in cache. |
| 779 | */ |
| 780 | static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size) |
| 781 | { |
| 782 | uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); |
| 783 | unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN); |
| 784 | |
| 785 | flush_dcache_range(aaddr, aaddr + asize); |
| 786 | invalidate_dcache_range(aaddr, aaddr + asize); |
| 787 | } |
| 788 | |
| 789 | /** |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 790 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header |
| 791 | * descriptor according to request |
| 792 | */ |
Marek Vasut | 7f26fcb | 2023-08-16 17:05:53 +0200 | [diff] [blame] | 793 | static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 794 | u32 *upiu_flags, |
| 795 | enum dma_data_direction cmd_dir) |
| 796 | { |
Marek Vasut | 7f26fcb | 2023-08-16 17:05:53 +0200 | [diff] [blame] | 797 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 798 | u32 data_direction; |
| 799 | u32 dword_0; |
| 800 | |
| 801 | if (cmd_dir == DMA_FROM_DEVICE) { |
| 802 | data_direction = UTP_DEVICE_TO_HOST; |
| 803 | *upiu_flags = UPIU_CMD_FLAGS_READ; |
| 804 | } else if (cmd_dir == DMA_TO_DEVICE) { |
| 805 | data_direction = UTP_HOST_TO_DEVICE; |
| 806 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; |
| 807 | } else { |
| 808 | data_direction = UTP_NO_DATA_TRANSFER; |
| 809 | *upiu_flags = UPIU_CMD_FLAGS_NONE; |
| 810 | } |
| 811 | |
| 812 | dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); |
| 813 | |
| 814 | /* Enable Interrupt for command */ |
| 815 | dword_0 |= UTP_REQ_DESC_INT_CMD; |
| 816 | |
| 817 | /* Transfer request descriptor header fields */ |
| 818 | req_desc->header.dword_0 = cpu_to_le32(dword_0); |
| 819 | /* dword_1 is reserved, hence it is set to 0 */ |
| 820 | req_desc->header.dword_1 = 0; |
| 821 | /* |
| 822 | * assigning invalid value for command status. Controller |
| 823 | * updates OCS on command completion, with the command |
| 824 | * status |
| 825 | */ |
| 826 | req_desc->header.dword_2 = |
| 827 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); |
| 828 | /* dword_3 is reserved, hence it is set to 0 */ |
| 829 | req_desc->header.dword_3 = 0; |
| 830 | |
| 831 | req_desc->prd_table_length = 0; |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 832 | |
| 833 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 834 | } |
| 835 | |
| 836 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, |
| 837 | u32 upiu_flags) |
| 838 | { |
| 839 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; |
| 840 | struct ufs_query *query = &hba->dev_cmd.query; |
| 841 | u16 len = be16_to_cpu(query->request.upiu_req.length); |
| 842 | |
| 843 | /* Query request header */ |
| 844 | ucd_req_ptr->header.dword_0 = |
| 845 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, |
| 846 | upiu_flags, 0, TASK_TAG); |
| 847 | ucd_req_ptr->header.dword_1 = |
| 848 | UPIU_HEADER_DWORD(0, query->request.query_func, |
| 849 | 0, 0); |
| 850 | |
| 851 | /* Data segment length only need for WRITE_DESC */ |
| 852 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) |
| 853 | ucd_req_ptr->header.dword_2 = |
| 854 | UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); |
| 855 | else |
| 856 | ucd_req_ptr->header.dword_2 = 0; |
| 857 | |
| 858 | /* Copy the Query Request buffer as is */ |
| 859 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); |
| 860 | |
| 861 | /* Copy the Descriptor */ |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 862 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 863 | memcpy(ucd_req_ptr + 1, query->descriptor, len); |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 864 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr)); |
| 865 | } else { |
| 866 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); |
| 867 | } |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 868 | |
| 869 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 870 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 871 | } |
| 872 | |
| 873 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) |
| 874 | { |
| 875 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; |
| 876 | |
| 877 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); |
| 878 | |
| 879 | /* command descriptor fields */ |
| 880 | ucd_req_ptr->header.dword_0 = |
Bhupesh Sharma | 820801e | 2023-07-03 00:39:12 +0530 | [diff] [blame] | 881 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 882 | /* clear rest of the fields of basic header */ |
| 883 | ucd_req_ptr->header.dword_1 = 0; |
| 884 | ucd_req_ptr->header.dword_2 = 0; |
| 885 | |
| 886 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 887 | |
| 888 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); |
| 889 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 890 | } |
| 891 | |
| 892 | /** |
| 893 | * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) |
| 894 | * for Device Management Purposes |
| 895 | */ |
| 896 | static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, |
| 897 | enum dev_cmd_type cmd_type) |
| 898 | { |
| 899 | u32 upiu_flags; |
| 900 | int ret = 0; |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 901 | |
| 902 | hba->dev_cmd.type = cmd_type; |
| 903 | |
Marek Vasut | 7f26fcb | 2023-08-16 17:05:53 +0200 | [diff] [blame] | 904 | ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 905 | switch (cmd_type) { |
| 906 | case DEV_CMD_TYPE_QUERY: |
| 907 | ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); |
| 908 | break; |
| 909 | case DEV_CMD_TYPE_NOP: |
| 910 | ufshcd_prepare_utp_nop_upiu(hba); |
| 911 | break; |
| 912 | default: |
| 913 | ret = -EINVAL; |
| 914 | } |
| 915 | |
| 916 | return ret; |
| 917 | } |
| 918 | |
| 919 | static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) |
| 920 | { |
| 921 | unsigned long start; |
| 922 | u32 intr_status; |
| 923 | u32 enabled_intr_status; |
| 924 | |
| 925 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
| 926 | |
Bhupesh Sharma | 2eb2a1e | 2023-08-14 11:58:20 +0530 | [diff] [blame] | 927 | /* Make sure doorbell reg is updated before reading interrupt status */ |
| 928 | wmb(); |
| 929 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 930 | start = get_timer(0); |
| 931 | do { |
| 932 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
| 933 | enabled_intr_status = intr_status & hba->intr_mask; |
| 934 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
| 935 | |
| 936 | if (get_timer(start) > QUERY_REQ_TIMEOUT) { |
| 937 | dev_err(hba->dev, |
| 938 | "Timedout waiting for UTP response\n"); |
| 939 | |
| 940 | return -ETIMEDOUT; |
| 941 | } |
| 942 | |
| 943 | if (enabled_intr_status & UFSHCD_ERROR_MASK) { |
| 944 | dev_err(hba->dev, "Error in status:%08x\n", |
| 945 | enabled_intr_status); |
| 946 | |
| 947 | return -1; |
| 948 | } |
| 949 | } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); |
| 950 | |
| 951 | return 0; |
| 952 | } |
| 953 | |
| 954 | /** |
| 955 | * ufshcd_get_req_rsp - returns the TR response transaction type |
| 956 | */ |
| 957 | static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) |
| 958 | { |
| 959 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; |
| 960 | } |
| 961 | |
| 962 | /** |
| 963 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status |
| 964 | * |
| 965 | */ |
| 966 | static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) |
| 967 | { |
Marek Vasut | 12675cb | 2023-08-16 17:05:54 +0200 | [diff] [blame] | 968 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
| 969 | |
| 970 | return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS; |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) |
| 974 | { |
| 975 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; |
| 976 | } |
| 977 | |
| 978 | static int ufshcd_check_query_response(struct ufs_hba *hba) |
| 979 | { |
| 980 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; |
| 981 | |
| 982 | /* Get the UPIU response */ |
| 983 | query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> |
| 984 | UPIU_RSP_CODE_OFFSET; |
| 985 | return query_res->response; |
| 986 | } |
| 987 | |
| 988 | /** |
| 989 | * ufshcd_copy_query_response() - Copy the Query Response and the data |
| 990 | * descriptor |
| 991 | */ |
| 992 | static int ufshcd_copy_query_response(struct ufs_hba *hba) |
| 993 | { |
| 994 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; |
| 995 | |
| 996 | memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
| 997 | |
| 998 | /* Get the descriptor */ |
| 999 | if (hba->dev_cmd.query.descriptor && |
| 1000 | hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { |
| 1001 | u8 *descp = (u8 *)hba->ucd_rsp_ptr + |
| 1002 | GENERAL_UPIU_REQUEST_SIZE; |
| 1003 | u16 resp_len; |
| 1004 | u16 buf_len; |
| 1005 | |
| 1006 | /* data segment length */ |
| 1007 | resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & |
| 1008 | MASK_QUERY_DATA_SEG_LEN; |
| 1009 | buf_len = |
| 1010 | be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); |
| 1011 | if (likely(buf_len >= resp_len)) { |
| 1012 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); |
| 1013 | } else { |
| 1014 | dev_warn(hba->dev, |
| 1015 | "%s: Response size is bigger than buffer", |
| 1016 | __func__); |
| 1017 | return -EINVAL; |
| 1018 | } |
| 1019 | } |
| 1020 | |
| 1021 | return 0; |
| 1022 | } |
| 1023 | |
| 1024 | /** |
| 1025 | * ufshcd_exec_dev_cmd - API for sending device management requests |
| 1026 | */ |
| 1027 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, |
| 1028 | int timeout) |
| 1029 | { |
| 1030 | int err; |
| 1031 | int resp; |
| 1032 | |
| 1033 | err = ufshcd_comp_devman_upiu(hba, cmd_type); |
| 1034 | if (err) |
| 1035 | return err; |
| 1036 | |
| 1037 | err = ufshcd_send_command(hba, TASK_TAG); |
| 1038 | if (err) |
| 1039 | return err; |
| 1040 | |
| 1041 | err = ufshcd_get_tr_ocs(hba); |
| 1042 | if (err) { |
| 1043 | dev_err(hba->dev, "Error in OCS:%d\n", err); |
| 1044 | return -EINVAL; |
| 1045 | } |
| 1046 | |
| 1047 | resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); |
| 1048 | switch (resp) { |
| 1049 | case UPIU_TRANSACTION_NOP_IN: |
| 1050 | break; |
| 1051 | case UPIU_TRANSACTION_QUERY_RSP: |
| 1052 | err = ufshcd_check_query_response(hba); |
| 1053 | if (!err) |
| 1054 | err = ufshcd_copy_query_response(hba); |
| 1055 | break; |
| 1056 | case UPIU_TRANSACTION_REJECT_UPIU: |
| 1057 | /* TODO: handle Reject UPIU Response */ |
| 1058 | err = -EPERM; |
| 1059 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", |
| 1060 | __func__); |
| 1061 | break; |
| 1062 | default: |
| 1063 | err = -EINVAL; |
| 1064 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", |
| 1065 | __func__, resp); |
| 1066 | } |
| 1067 | |
| 1068 | return err; |
| 1069 | } |
| 1070 | |
| 1071 | /** |
| 1072 | * ufshcd_init_query() - init the query response and request parameters |
| 1073 | */ |
| 1074 | static inline void ufshcd_init_query(struct ufs_hba *hba, |
| 1075 | struct ufs_query_req **request, |
| 1076 | struct ufs_query_res **response, |
| 1077 | enum query_opcode opcode, |
| 1078 | u8 idn, u8 index, u8 selector) |
| 1079 | { |
| 1080 | *request = &hba->dev_cmd.query.request; |
| 1081 | *response = &hba->dev_cmd.query.response; |
| 1082 | memset(*request, 0, sizeof(struct ufs_query_req)); |
| 1083 | memset(*response, 0, sizeof(struct ufs_query_res)); |
| 1084 | (*request)->upiu_req.opcode = opcode; |
| 1085 | (*request)->upiu_req.idn = idn; |
| 1086 | (*request)->upiu_req.index = index; |
| 1087 | (*request)->upiu_req.selector = selector; |
| 1088 | } |
| 1089 | |
| 1090 | /** |
| 1091 | * ufshcd_query_flag() - API function for sending flag query requests |
| 1092 | */ |
| 1093 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
| 1094 | enum flag_idn idn, bool *flag_res) |
| 1095 | { |
| 1096 | struct ufs_query_req *request = NULL; |
| 1097 | struct ufs_query_res *response = NULL; |
| 1098 | int err, index = 0, selector = 0; |
| 1099 | int timeout = QUERY_REQ_TIMEOUT; |
| 1100 | |
| 1101 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
| 1102 | selector); |
| 1103 | |
| 1104 | switch (opcode) { |
| 1105 | case UPIU_QUERY_OPCODE_SET_FLAG: |
| 1106 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: |
| 1107 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: |
| 1108 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; |
| 1109 | break; |
| 1110 | case UPIU_QUERY_OPCODE_READ_FLAG: |
| 1111 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; |
| 1112 | if (!flag_res) { |
| 1113 | /* No dummy reads */ |
| 1114 | dev_err(hba->dev, "%s: Invalid argument for read request\n", |
| 1115 | __func__); |
| 1116 | err = -EINVAL; |
| 1117 | goto out; |
| 1118 | } |
| 1119 | break; |
| 1120 | default: |
| 1121 | dev_err(hba->dev, |
| 1122 | "%s: Expected query flag opcode but got = %d\n", |
| 1123 | __func__, opcode); |
| 1124 | err = -EINVAL; |
| 1125 | goto out; |
| 1126 | } |
| 1127 | |
| 1128 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); |
| 1129 | |
| 1130 | if (err) { |
| 1131 | dev_err(hba->dev, |
| 1132 | "%s: Sending flag query for idn %d failed, err = %d\n", |
| 1133 | __func__, idn, err); |
| 1134 | goto out; |
| 1135 | } |
| 1136 | |
| 1137 | if (flag_res) |
| 1138 | *flag_res = (be32_to_cpu(response->upiu_res.value) & |
| 1139 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
| 1140 | |
| 1141 | out: |
| 1142 | return err; |
| 1143 | } |
| 1144 | |
| 1145 | static int ufshcd_query_flag_retry(struct ufs_hba *hba, |
| 1146 | enum query_opcode opcode, |
| 1147 | enum flag_idn idn, bool *flag_res) |
| 1148 | { |
| 1149 | int ret; |
| 1150 | int retries; |
| 1151 | |
| 1152 | for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { |
| 1153 | ret = ufshcd_query_flag(hba, opcode, idn, flag_res); |
| 1154 | if (ret) |
| 1155 | dev_dbg(hba->dev, |
| 1156 | "%s: failed with error %d, retries %d\n", |
| 1157 | __func__, ret, retries); |
| 1158 | else |
| 1159 | break; |
| 1160 | } |
| 1161 | |
| 1162 | if (ret) |
| 1163 | dev_err(hba->dev, |
| 1164 | "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", |
| 1165 | __func__, opcode, idn, ret, retries); |
| 1166 | return ret; |
| 1167 | } |
| 1168 | |
| 1169 | static int __ufshcd_query_descriptor(struct ufs_hba *hba, |
| 1170 | enum query_opcode opcode, |
| 1171 | enum desc_idn idn, u8 index, u8 selector, |
| 1172 | u8 *desc_buf, int *buf_len) |
| 1173 | { |
| 1174 | struct ufs_query_req *request = NULL; |
| 1175 | struct ufs_query_res *response = NULL; |
| 1176 | int err; |
| 1177 | |
| 1178 | if (!desc_buf) { |
| 1179 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", |
| 1180 | __func__, opcode); |
| 1181 | err = -EINVAL; |
| 1182 | goto out; |
| 1183 | } |
| 1184 | |
| 1185 | if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { |
| 1186 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", |
| 1187 | __func__, *buf_len); |
| 1188 | err = -EINVAL; |
| 1189 | goto out; |
| 1190 | } |
| 1191 | |
| 1192 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
| 1193 | selector); |
| 1194 | hba->dev_cmd.query.descriptor = desc_buf; |
| 1195 | request->upiu_req.length = cpu_to_be16(*buf_len); |
| 1196 | |
| 1197 | switch (opcode) { |
| 1198 | case UPIU_QUERY_OPCODE_WRITE_DESC: |
| 1199 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; |
| 1200 | break; |
| 1201 | case UPIU_QUERY_OPCODE_READ_DESC: |
| 1202 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; |
| 1203 | break; |
| 1204 | default: |
| 1205 | dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", |
| 1206 | __func__, opcode); |
| 1207 | err = -EINVAL; |
| 1208 | goto out; |
| 1209 | } |
| 1210 | |
| 1211 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
| 1212 | |
| 1213 | if (err) { |
| 1214 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", |
| 1215 | __func__, opcode, idn, index, err); |
| 1216 | goto out; |
| 1217 | } |
| 1218 | |
| 1219 | hba->dev_cmd.query.descriptor = NULL; |
| 1220 | *buf_len = be16_to_cpu(response->upiu_res.length); |
| 1221 | |
| 1222 | out: |
| 1223 | return err; |
| 1224 | } |
| 1225 | |
| 1226 | /** |
| 1227 | * ufshcd_query_descriptor_retry - API function for sending descriptor requests |
| 1228 | */ |
| 1229 | int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, |
| 1230 | enum desc_idn idn, u8 index, u8 selector, |
| 1231 | u8 *desc_buf, int *buf_len) |
| 1232 | { |
| 1233 | int err; |
| 1234 | int retries; |
| 1235 | |
| 1236 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { |
| 1237 | err = __ufshcd_query_descriptor(hba, opcode, idn, index, |
| 1238 | selector, desc_buf, buf_len); |
| 1239 | if (!err || err == -EINVAL) |
| 1240 | break; |
| 1241 | } |
| 1242 | |
| 1243 | return err; |
| 1244 | } |
| 1245 | |
| 1246 | /** |
| 1247 | * ufshcd_read_desc_length - read the specified descriptor length from header |
| 1248 | */ |
| 1249 | static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, |
| 1250 | int desc_index, int *desc_length) |
| 1251 | { |
| 1252 | int ret; |
| 1253 | u8 header[QUERY_DESC_HDR_SIZE]; |
| 1254 | int header_len = QUERY_DESC_HDR_SIZE; |
| 1255 | |
| 1256 | if (desc_id >= QUERY_DESC_IDN_MAX) |
| 1257 | return -EINVAL; |
| 1258 | |
| 1259 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, |
| 1260 | desc_id, desc_index, 0, header, |
| 1261 | &header_len); |
| 1262 | |
| 1263 | if (ret) { |
| 1264 | dev_err(hba->dev, "%s: Failed to get descriptor header id %d", |
| 1265 | __func__, desc_id); |
| 1266 | return ret; |
| 1267 | } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { |
| 1268 | dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", |
| 1269 | __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], |
| 1270 | desc_id); |
| 1271 | ret = -EINVAL; |
| 1272 | } |
| 1273 | |
| 1274 | *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; |
| 1275 | |
| 1276 | return ret; |
| 1277 | } |
| 1278 | |
| 1279 | static void ufshcd_init_desc_sizes(struct ufs_hba *hba) |
| 1280 | { |
| 1281 | int err; |
| 1282 | |
| 1283 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, |
| 1284 | &hba->desc_size.dev_desc); |
| 1285 | if (err) |
| 1286 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; |
| 1287 | |
| 1288 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, |
| 1289 | &hba->desc_size.pwr_desc); |
| 1290 | if (err) |
| 1291 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; |
| 1292 | |
| 1293 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, |
| 1294 | &hba->desc_size.interc_desc); |
| 1295 | if (err) |
| 1296 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; |
| 1297 | |
| 1298 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, |
| 1299 | &hba->desc_size.conf_desc); |
| 1300 | if (err) |
| 1301 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; |
| 1302 | |
| 1303 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, |
| 1304 | &hba->desc_size.unit_desc); |
| 1305 | if (err) |
| 1306 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; |
| 1307 | |
| 1308 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, |
| 1309 | &hba->desc_size.geom_desc); |
| 1310 | if (err) |
| 1311 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; |
| 1312 | |
| 1313 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, |
| 1314 | &hba->desc_size.hlth_desc); |
| 1315 | if (err) |
| 1316 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; |
| 1317 | } |
| 1318 | |
| 1319 | /** |
| 1320 | * ufshcd_map_desc_id_to_length - map descriptor IDN to its length |
| 1321 | * |
| 1322 | */ |
| 1323 | int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, |
| 1324 | int *desc_len) |
| 1325 | { |
| 1326 | switch (desc_id) { |
| 1327 | case QUERY_DESC_IDN_DEVICE: |
| 1328 | *desc_len = hba->desc_size.dev_desc; |
| 1329 | break; |
| 1330 | case QUERY_DESC_IDN_POWER: |
| 1331 | *desc_len = hba->desc_size.pwr_desc; |
| 1332 | break; |
| 1333 | case QUERY_DESC_IDN_GEOMETRY: |
| 1334 | *desc_len = hba->desc_size.geom_desc; |
| 1335 | break; |
| 1336 | case QUERY_DESC_IDN_CONFIGURATION: |
| 1337 | *desc_len = hba->desc_size.conf_desc; |
| 1338 | break; |
| 1339 | case QUERY_DESC_IDN_UNIT: |
| 1340 | *desc_len = hba->desc_size.unit_desc; |
| 1341 | break; |
| 1342 | case QUERY_DESC_IDN_INTERCONNECT: |
| 1343 | *desc_len = hba->desc_size.interc_desc; |
| 1344 | break; |
| 1345 | case QUERY_DESC_IDN_STRING: |
| 1346 | *desc_len = QUERY_DESC_MAX_SIZE; |
| 1347 | break; |
| 1348 | case QUERY_DESC_IDN_HEALTH: |
| 1349 | *desc_len = hba->desc_size.hlth_desc; |
| 1350 | break; |
| 1351 | case QUERY_DESC_IDN_RFU_0: |
| 1352 | case QUERY_DESC_IDN_RFU_1: |
| 1353 | *desc_len = 0; |
| 1354 | break; |
| 1355 | default: |
| 1356 | *desc_len = 0; |
| 1357 | return -EINVAL; |
| 1358 | } |
| 1359 | return 0; |
| 1360 | } |
| 1361 | EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); |
| 1362 | |
| 1363 | /** |
| 1364 | * ufshcd_read_desc_param - read the specified descriptor parameter |
| 1365 | * |
| 1366 | */ |
| 1367 | int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, |
| 1368 | int desc_index, u8 param_offset, u8 *param_read_buf, |
| 1369 | u8 param_size) |
| 1370 | { |
| 1371 | int ret; |
| 1372 | u8 *desc_buf; |
| 1373 | int buff_len; |
| 1374 | bool is_kmalloc = true; |
| 1375 | |
| 1376 | /* Safety check */ |
| 1377 | if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) |
| 1378 | return -EINVAL; |
| 1379 | |
| 1380 | /* Get the max length of descriptor from structure filled up at probe |
| 1381 | * time. |
| 1382 | */ |
| 1383 | ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); |
| 1384 | |
| 1385 | /* Sanity checks */ |
| 1386 | if (ret || !buff_len) { |
| 1387 | dev_err(hba->dev, "%s: Failed to get full descriptor length", |
| 1388 | __func__); |
| 1389 | return ret; |
| 1390 | } |
| 1391 | |
| 1392 | /* Check whether we need temp memory */ |
| 1393 | if (param_offset != 0 || param_size < buff_len) { |
| 1394 | desc_buf = kmalloc(buff_len, GFP_KERNEL); |
| 1395 | if (!desc_buf) |
| 1396 | return -ENOMEM; |
| 1397 | } else { |
| 1398 | desc_buf = param_read_buf; |
| 1399 | is_kmalloc = false; |
| 1400 | } |
| 1401 | |
| 1402 | /* Request for full descriptor */ |
| 1403 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, |
| 1404 | desc_id, desc_index, 0, desc_buf, |
| 1405 | &buff_len); |
| 1406 | |
| 1407 | if (ret) { |
| 1408 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", |
| 1409 | __func__, desc_id, desc_index, param_offset, ret); |
| 1410 | goto out; |
| 1411 | } |
| 1412 | |
| 1413 | /* Sanity check */ |
| 1414 | if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { |
| 1415 | dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header", |
| 1416 | __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); |
| 1417 | ret = -EINVAL; |
| 1418 | goto out; |
| 1419 | } |
| 1420 | |
| 1421 | /* Check wherher we will not copy more data, than available */ |
| 1422 | if (is_kmalloc && param_size > buff_len) |
| 1423 | param_size = buff_len; |
| 1424 | |
| 1425 | if (is_kmalloc) |
| 1426 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); |
| 1427 | out: |
| 1428 | if (is_kmalloc) |
| 1429 | kfree(desc_buf); |
| 1430 | return ret; |
| 1431 | } |
| 1432 | |
| 1433 | /* replace non-printable or non-ASCII characters with spaces */ |
| 1434 | static inline void ufshcd_remove_non_printable(uint8_t *val) |
| 1435 | { |
| 1436 | if (!val) |
| 1437 | return; |
| 1438 | |
| 1439 | if (*val < 0x20 || *val > 0x7e) |
| 1440 | *val = ' '; |
| 1441 | } |
| 1442 | |
| 1443 | /** |
| 1444 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
| 1445 | * state) and waits for it to take effect. |
| 1446 | * |
| 1447 | */ |
| 1448 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
| 1449 | { |
| 1450 | unsigned long start = 0; |
| 1451 | u8 status; |
| 1452 | int ret; |
| 1453 | |
| 1454 | ret = ufshcd_send_uic_cmd(hba, cmd); |
| 1455 | if (ret) { |
| 1456 | dev_err(hba->dev, |
| 1457 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", |
| 1458 | cmd->command, cmd->argument3, ret); |
| 1459 | |
| 1460 | return ret; |
| 1461 | } |
| 1462 | |
| 1463 | start = get_timer(0); |
| 1464 | do { |
| 1465 | status = ufshcd_get_upmcrs(hba); |
| 1466 | if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { |
| 1467 | dev_err(hba->dev, |
| 1468 | "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", |
| 1469 | cmd->command, status); |
| 1470 | ret = (status != PWR_OK) ? status : -1; |
| 1471 | break; |
| 1472 | } |
| 1473 | } while (status != PWR_LOCAL); |
| 1474 | |
| 1475 | return ret; |
| 1476 | } |
| 1477 | |
| 1478 | /** |
| 1479 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change |
| 1480 | * using DME_SET primitives. |
| 1481 | */ |
| 1482 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) |
| 1483 | { |
| 1484 | struct uic_command uic_cmd = {0}; |
| 1485 | int ret; |
| 1486 | |
| 1487 | uic_cmd.command = UIC_CMD_DME_SET; |
| 1488 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); |
| 1489 | uic_cmd.argument3 = mode; |
| 1490 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); |
| 1491 | |
| 1492 | return ret; |
| 1493 | } |
| 1494 | |
| 1495 | static |
| 1496 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, |
| 1497 | struct scsi_cmd *pccb, u32 upiu_flags) |
| 1498 | { |
| 1499 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; |
| 1500 | unsigned int cdb_len; |
| 1501 | |
| 1502 | /* command descriptor fields */ |
| 1503 | ucd_req_ptr->header.dword_0 = |
| 1504 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, |
| 1505 | pccb->lun, TASK_TAG); |
| 1506 | ucd_req_ptr->header.dword_1 = |
| 1507 | UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); |
| 1508 | |
| 1509 | /* Total EHS length and Data segment length will be zero */ |
| 1510 | ucd_req_ptr->header.dword_2 = 0; |
| 1511 | |
| 1512 | ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); |
| 1513 | |
| 1514 | cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); |
| 1515 | memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); |
| 1516 | memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); |
| 1517 | |
| 1518 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 1519 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); |
| 1520 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1521 | } |
| 1522 | |
| 1523 | static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, |
| 1524 | unsigned char *buf, ulong len) |
| 1525 | { |
| 1526 | entry->size = cpu_to_le32(len) | GENMASK(1, 0); |
| 1527 | entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); |
| 1528 | entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); |
| 1529 | } |
| 1530 | |
| 1531 | static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) |
| 1532 | { |
| 1533 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
| 1534 | struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 1535 | uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1536 | ulong datalen = pccb->datalen; |
| 1537 | int table_length; |
| 1538 | u8 *buf; |
| 1539 | int i; |
| 1540 | |
| 1541 | if (!datalen) { |
| 1542 | req_desc->prd_table_length = 0; |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 1543 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1544 | return; |
| 1545 | } |
| 1546 | |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 1547 | if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */ |
| 1548 | flush_dcache_range(aaddr, aaddr + |
| 1549 | ALIGN(datalen, ARCH_DMA_MINALIGN)); |
| 1550 | } |
| 1551 | |
| 1552 | /* In any case, invalidate cache to avoid stale data in it. */ |
| 1553 | invalidate_dcache_range(aaddr, aaddr + |
| 1554 | ALIGN(datalen, ARCH_DMA_MINALIGN)); |
| 1555 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1556 | table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); |
| 1557 | buf = pccb->pdata; |
| 1558 | i = table_length; |
| 1559 | while (--i) { |
| 1560 | prepare_prdt_desc(&prd_table[table_length - i - 1], buf, |
| 1561 | MAX_PRDT_ENTRY - 1); |
| 1562 | buf += MAX_PRDT_ENTRY; |
| 1563 | datalen -= MAX_PRDT_ENTRY; |
| 1564 | } |
| 1565 | |
| 1566 | prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); |
| 1567 | |
| 1568 | req_desc->prd_table_length = table_length; |
Marek Vasut | c5b3e5c | 2023-08-16 17:05:55 +0200 | [diff] [blame] | 1569 | ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length); |
| 1570 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1571 | } |
| 1572 | |
| 1573 | static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) |
| 1574 | { |
| 1575 | struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1576 | u32 upiu_flags; |
| 1577 | int ocs, result = 0; |
| 1578 | u8 scsi_status; |
| 1579 | |
Marek Vasut | 7f26fcb | 2023-08-16 17:05:53 +0200 | [diff] [blame] | 1580 | ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1581 | ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); |
| 1582 | prepare_prdt_table(hba, pccb); |
| 1583 | |
| 1584 | ufshcd_send_command(hba, TASK_TAG); |
| 1585 | |
| 1586 | ocs = ufshcd_get_tr_ocs(hba); |
| 1587 | switch (ocs) { |
| 1588 | case OCS_SUCCESS: |
| 1589 | result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); |
| 1590 | switch (result) { |
| 1591 | case UPIU_TRANSACTION_RESPONSE: |
| 1592 | result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); |
| 1593 | |
| 1594 | scsi_status = result & MASK_SCSI_STATUS; |
| 1595 | if (scsi_status) |
| 1596 | return -EINVAL; |
| 1597 | |
| 1598 | break; |
| 1599 | case UPIU_TRANSACTION_REJECT_UPIU: |
| 1600 | /* TODO: handle Reject UPIU Response */ |
| 1601 | dev_err(hba->dev, |
| 1602 | "Reject UPIU not fully implemented\n"); |
| 1603 | return -EINVAL; |
| 1604 | default: |
| 1605 | dev_err(hba->dev, |
| 1606 | "Unexpected request response code = %x\n", |
| 1607 | result); |
| 1608 | return -EINVAL; |
| 1609 | } |
| 1610 | break; |
| 1611 | default: |
| 1612 | dev_err(hba->dev, "OCS error from controller = %x\n", ocs); |
| 1613 | return -EINVAL; |
| 1614 | } |
| 1615 | |
| 1616 | return 0; |
| 1617 | } |
| 1618 | |
| 1619 | static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, |
| 1620 | int desc_index, u8 *buf, u32 size) |
| 1621 | { |
| 1622 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); |
| 1623 | } |
| 1624 | |
| 1625 | static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) |
| 1626 | { |
| 1627 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); |
| 1628 | } |
| 1629 | |
| 1630 | /** |
| 1631 | * ufshcd_read_string_desc - read string descriptor |
| 1632 | * |
| 1633 | */ |
| 1634 | int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, |
| 1635 | u8 *buf, u32 size, bool ascii) |
| 1636 | { |
| 1637 | int err = 0; |
| 1638 | |
| 1639 | err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, |
| 1640 | size); |
| 1641 | |
| 1642 | if (err) { |
| 1643 | dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", |
| 1644 | __func__, QUERY_REQ_RETRIES, err); |
| 1645 | goto out; |
| 1646 | } |
| 1647 | |
| 1648 | if (ascii) { |
| 1649 | int desc_len; |
| 1650 | int ascii_len; |
| 1651 | int i; |
| 1652 | u8 *buff_ascii; |
| 1653 | |
| 1654 | desc_len = buf[0]; |
| 1655 | /* remove header and divide by 2 to move from UTF16 to UTF8 */ |
| 1656 | ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; |
| 1657 | if (size < ascii_len + QUERY_DESC_HDR_SIZE) { |
| 1658 | dev_err(hba->dev, "%s: buffer allocated size is too small\n", |
| 1659 | __func__); |
| 1660 | err = -ENOMEM; |
| 1661 | goto out; |
| 1662 | } |
| 1663 | |
| 1664 | buff_ascii = kmalloc(ascii_len, GFP_KERNEL); |
| 1665 | if (!buff_ascii) { |
| 1666 | err = -ENOMEM; |
| 1667 | goto out; |
| 1668 | } |
| 1669 | |
| 1670 | /* |
| 1671 | * the descriptor contains string in UTF16 format |
| 1672 | * we need to convert to utf-8 so it can be displayed |
| 1673 | */ |
| 1674 | utf16_to_utf8(buff_ascii, |
| 1675 | (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); |
| 1676 | |
| 1677 | /* replace non-printable or non-ASCII characters with spaces */ |
| 1678 | for (i = 0; i < ascii_len; i++) |
| 1679 | ufshcd_remove_non_printable(&buff_ascii[i]); |
| 1680 | |
| 1681 | memset(buf + QUERY_DESC_HDR_SIZE, 0, |
| 1682 | size - QUERY_DESC_HDR_SIZE); |
| 1683 | memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); |
| 1684 | buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; |
| 1685 | kfree(buff_ascii); |
| 1686 | } |
| 1687 | out: |
| 1688 | return err; |
| 1689 | } |
| 1690 | |
| 1691 | static int ufs_get_device_desc(struct ufs_hba *hba, |
| 1692 | struct ufs_dev_desc *dev_desc) |
| 1693 | { |
| 1694 | int err; |
| 1695 | size_t buff_len; |
| 1696 | u8 model_index; |
| 1697 | u8 *desc_buf; |
| 1698 | |
| 1699 | buff_len = max_t(size_t, hba->desc_size.dev_desc, |
| 1700 | QUERY_DESC_MAX_SIZE + 1); |
| 1701 | desc_buf = kmalloc(buff_len, GFP_KERNEL); |
| 1702 | if (!desc_buf) { |
| 1703 | err = -ENOMEM; |
| 1704 | goto out; |
| 1705 | } |
| 1706 | |
| 1707 | err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); |
| 1708 | if (err) { |
| 1709 | dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", |
| 1710 | __func__, err); |
| 1711 | goto out; |
| 1712 | } |
| 1713 | |
| 1714 | /* |
| 1715 | * getting vendor (manufacturerID) and Bank Index in big endian |
| 1716 | * format |
| 1717 | */ |
| 1718 | dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | |
| 1719 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; |
| 1720 | |
| 1721 | model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; |
| 1722 | |
| 1723 | /* Zero-pad entire buffer for string termination. */ |
| 1724 | memset(desc_buf, 0, buff_len); |
| 1725 | |
| 1726 | err = ufshcd_read_string_desc(hba, model_index, desc_buf, |
| 1727 | QUERY_DESC_MAX_SIZE, true/*ASCII*/); |
| 1728 | if (err) { |
| 1729 | dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", |
| 1730 | __func__, err); |
| 1731 | goto out; |
| 1732 | } |
| 1733 | |
| 1734 | desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; |
| 1735 | strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE), |
| 1736 | min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], |
| 1737 | MAX_MODEL_LEN)); |
| 1738 | |
| 1739 | /* Null terminate the model string */ |
| 1740 | dev_desc->model[MAX_MODEL_LEN] = '\0'; |
| 1741 | |
| 1742 | out: |
| 1743 | kfree(desc_buf); |
| 1744 | return err; |
| 1745 | } |
| 1746 | |
| 1747 | /** |
| 1748 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device |
| 1749 | */ |
| 1750 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) |
| 1751 | { |
| 1752 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; |
| 1753 | |
| 1754 | if (hba->max_pwr_info.is_valid) |
| 1755 | return 0; |
| 1756 | |
Marek Vasut | f430151 | 2023-08-16 17:05:51 +0200 | [diff] [blame] | 1757 | if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { |
| 1758 | pwr_info->pwr_tx = FASTAUTO_MODE; |
| 1759 | pwr_info->pwr_rx = FASTAUTO_MODE; |
| 1760 | } else { |
| 1761 | pwr_info->pwr_tx = FAST_MODE; |
| 1762 | pwr_info->pwr_rx = FAST_MODE; |
| 1763 | } |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1764 | pwr_info->hs_rate = PA_HS_MODE_B; |
| 1765 | |
| 1766 | /* Get the connected lane count */ |
| 1767 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), |
| 1768 | &pwr_info->lane_rx); |
| 1769 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), |
| 1770 | &pwr_info->lane_tx); |
| 1771 | |
| 1772 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { |
| 1773 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", |
| 1774 | __func__, pwr_info->lane_rx, pwr_info->lane_tx); |
| 1775 | return -EINVAL; |
| 1776 | } |
| 1777 | |
| 1778 | /* |
| 1779 | * First, get the maximum gears of HS speed. |
| 1780 | * If a zero value, it means there is no HSGEAR capability. |
| 1781 | * Then, get the maximum gears of PWM speed. |
| 1782 | */ |
| 1783 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); |
| 1784 | if (!pwr_info->gear_rx) { |
| 1785 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
| 1786 | &pwr_info->gear_rx); |
| 1787 | if (!pwr_info->gear_rx) { |
| 1788 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", |
| 1789 | __func__, pwr_info->gear_rx); |
| 1790 | return -EINVAL; |
| 1791 | } |
| 1792 | pwr_info->pwr_rx = SLOW_MODE; |
| 1793 | } |
| 1794 | |
| 1795 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), |
| 1796 | &pwr_info->gear_tx); |
| 1797 | if (!pwr_info->gear_tx) { |
| 1798 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
| 1799 | &pwr_info->gear_tx); |
| 1800 | if (!pwr_info->gear_tx) { |
| 1801 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", |
| 1802 | __func__, pwr_info->gear_tx); |
| 1803 | return -EINVAL; |
| 1804 | } |
| 1805 | pwr_info->pwr_tx = SLOW_MODE; |
| 1806 | } |
| 1807 | |
| 1808 | hba->max_pwr_info.is_valid = true; |
| 1809 | return 0; |
| 1810 | } |
| 1811 | |
| 1812 | static int ufshcd_change_power_mode(struct ufs_hba *hba, |
| 1813 | struct ufs_pa_layer_attr *pwr_mode) |
| 1814 | { |
| 1815 | int ret; |
| 1816 | |
| 1817 | /* if already configured to the requested pwr_mode */ |
| 1818 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && |
| 1819 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && |
| 1820 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && |
| 1821 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && |
| 1822 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && |
| 1823 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && |
| 1824 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { |
| 1825 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); |
| 1826 | return 0; |
| 1827 | } |
| 1828 | |
| 1829 | /* |
| 1830 | * Configure attributes for power mode change with below. |
| 1831 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, |
| 1832 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, |
| 1833 | * - PA_HSSERIES |
| 1834 | */ |
| 1835 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); |
| 1836 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), |
| 1837 | pwr_mode->lane_rx); |
| 1838 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) |
| 1839 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); |
| 1840 | else |
| 1841 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); |
| 1842 | |
| 1843 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); |
| 1844 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), |
| 1845 | pwr_mode->lane_tx); |
| 1846 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) |
| 1847 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); |
| 1848 | else |
| 1849 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); |
| 1850 | |
| 1851 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || |
| 1852 | pwr_mode->pwr_tx == FASTAUTO_MODE || |
| 1853 | pwr_mode->pwr_rx == FAST_MODE || |
| 1854 | pwr_mode->pwr_tx == FAST_MODE) |
| 1855 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), |
| 1856 | pwr_mode->hs_rate); |
| 1857 | |
| 1858 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | |
| 1859 | pwr_mode->pwr_tx); |
| 1860 | |
| 1861 | if (ret) { |
| 1862 | dev_err(hba->dev, |
| 1863 | "%s: power mode change failed %d\n", __func__, ret); |
| 1864 | |
| 1865 | return ret; |
| 1866 | } |
| 1867 | |
| 1868 | /* Copy new Power Mode to power info */ |
| 1869 | memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); |
| 1870 | |
| 1871 | return ret; |
| 1872 | } |
| 1873 | |
| 1874 | /** |
| 1875 | * ufshcd_verify_dev_init() - Verify device initialization |
| 1876 | * |
| 1877 | */ |
| 1878 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) |
| 1879 | { |
| 1880 | int retries; |
| 1881 | int err; |
| 1882 | |
| 1883 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { |
| 1884 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, |
| 1885 | NOP_OUT_TIMEOUT); |
| 1886 | if (!err || err == -ETIMEDOUT) |
| 1887 | break; |
| 1888 | |
| 1889 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); |
| 1890 | } |
| 1891 | |
| 1892 | if (err) |
| 1893 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); |
| 1894 | |
| 1895 | return err; |
| 1896 | } |
| 1897 | |
| 1898 | /** |
| 1899 | * ufshcd_complete_dev_init() - checks device readiness |
| 1900 | */ |
| 1901 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) |
| 1902 | { |
| 1903 | int i; |
| 1904 | int err; |
| 1905 | bool flag_res = 1; |
| 1906 | |
| 1907 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
| 1908 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); |
| 1909 | if (err) { |
| 1910 | dev_err(hba->dev, |
| 1911 | "%s setting fDeviceInit flag failed with error %d\n", |
| 1912 | __func__, err); |
| 1913 | goto out; |
| 1914 | } |
| 1915 | |
| 1916 | /* poll for max. 1000 iterations for fDeviceInit flag to clear */ |
| 1917 | for (i = 0; i < 1000 && !err && flag_res; i++) |
| 1918 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, |
| 1919 | QUERY_FLAG_IDN_FDEVICEINIT, |
| 1920 | &flag_res); |
| 1921 | |
| 1922 | if (err) |
| 1923 | dev_err(hba->dev, |
| 1924 | "%s reading fDeviceInit flag failed with error %d\n", |
| 1925 | __func__, err); |
| 1926 | else if (flag_res) |
| 1927 | dev_err(hba->dev, |
| 1928 | "%s fDeviceInit was not cleared by the device\n", |
| 1929 | __func__); |
| 1930 | |
| 1931 | out: |
| 1932 | return err; |
| 1933 | } |
| 1934 | |
| 1935 | static void ufshcd_def_desc_sizes(struct ufs_hba *hba) |
| 1936 | { |
| 1937 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; |
| 1938 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; |
| 1939 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; |
| 1940 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; |
| 1941 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; |
| 1942 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; |
| 1943 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; |
| 1944 | } |
| 1945 | |
| 1946 | int ufs_start(struct ufs_hba *hba) |
| 1947 | { |
| 1948 | struct ufs_dev_desc card = {0}; |
| 1949 | int ret; |
| 1950 | |
| 1951 | ret = ufshcd_link_startup(hba); |
| 1952 | if (ret) |
| 1953 | return ret; |
| 1954 | |
| 1955 | ret = ufshcd_verify_dev_init(hba); |
| 1956 | if (ret) |
| 1957 | return ret; |
| 1958 | |
| 1959 | ret = ufshcd_complete_dev_init(hba); |
| 1960 | if (ret) |
| 1961 | return ret; |
| 1962 | |
| 1963 | /* Init check for device descriptor sizes */ |
| 1964 | ufshcd_init_desc_sizes(hba); |
| 1965 | |
| 1966 | ret = ufs_get_device_desc(hba, &card); |
| 1967 | if (ret) { |
| 1968 | dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", |
| 1969 | __func__, ret); |
| 1970 | |
| 1971 | return ret; |
| 1972 | } |
| 1973 | |
| 1974 | if (ufshcd_get_max_pwr_mode(hba)) { |
| 1975 | dev_err(hba->dev, |
| 1976 | "%s: Failed getting max supported power mode\n", |
| 1977 | __func__); |
| 1978 | } else { |
| 1979 | ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); |
| 1980 | if (ret) { |
| 1981 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", |
| 1982 | __func__, ret); |
| 1983 | |
| 1984 | return ret; |
| 1985 | } |
| 1986 | |
Bhupesh Sharma | 4089f83 | 2023-08-15 01:35:27 +0530 | [diff] [blame] | 1987 | debug("UFS Device %s is up!\n", hba->dev->name); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1988 | ufshcd_print_pwr_info(hba); |
| 1989 | } |
| 1990 | |
| 1991 | return 0; |
| 1992 | } |
| 1993 | |
| 1994 | int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) |
| 1995 | { |
| 1996 | struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); |
Simon Glass | 8a8d24b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 1997 | struct scsi_plat *scsi_plat; |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 1998 | struct udevice *scsi_dev; |
| 1999 | int err; |
| 2000 | |
| 2001 | device_find_first_child(ufs_dev, &scsi_dev); |
| 2002 | if (!scsi_dev) |
| 2003 | return -ENODEV; |
| 2004 | |
Simon Glass | caa4daa | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 2005 | scsi_plat = dev_get_uclass_plat(scsi_dev); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2006 | scsi_plat->max_id = UFSHCD_MAX_ID; |
| 2007 | scsi_plat->max_lun = UFS_MAX_LUNS; |
| 2008 | scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; |
| 2009 | |
| 2010 | hba->dev = ufs_dev; |
| 2011 | hba->ops = hba_ops; |
Johan Jonker | a12a73b | 2023-03-13 01:32:04 +0100 | [diff] [blame] | 2012 | hba->mmio_base = dev_read_addr_ptr(ufs_dev); |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2013 | |
| 2014 | /* Set descriptor lengths to specification defaults */ |
| 2015 | ufshcd_def_desc_sizes(hba); |
| 2016 | |
| 2017 | ufshcd_ops_init(hba); |
| 2018 | |
| 2019 | /* Read capabilties registers */ |
| 2020 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
Marek Vasut | 91913a1 | 2023-08-16 17:05:50 +0200 | [diff] [blame] | 2021 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) |
| 2022 | hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2023 | |
| 2024 | /* Get UFS version supported by the controller */ |
| 2025 | hba->version = ufshcd_get_ufs_version(hba); |
| 2026 | if (hba->version != UFSHCI_VERSION_10 && |
| 2027 | hba->version != UFSHCI_VERSION_11 && |
| 2028 | hba->version != UFSHCI_VERSION_20 && |
Marek Vasut | 2ff810a | 2023-08-16 17:05:52 +0200 | [diff] [blame] | 2029 | hba->version != UFSHCI_VERSION_21 && |
| 2030 | hba->version != UFSHCI_VERSION_30) |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2031 | dev_err(hba->dev, "invalid UFS version 0x%x\n", |
| 2032 | hba->version); |
| 2033 | |
| 2034 | /* Get Interrupt bit mask per version */ |
| 2035 | hba->intr_mask = ufshcd_get_intr_mask(hba); |
| 2036 | |
| 2037 | /* Allocate memory for host memory space */ |
| 2038 | err = ufshcd_memory_alloc(hba); |
| 2039 | if (err) { |
| 2040 | dev_err(hba->dev, "Memory allocation failed\n"); |
| 2041 | return err; |
| 2042 | } |
| 2043 | |
| 2044 | /* Configure Local data structures */ |
| 2045 | ufshcd_host_memory_configure(hba); |
| 2046 | |
| 2047 | /* |
| 2048 | * In order to avoid any spurious interrupt immediately after |
| 2049 | * registering UFS controller interrupt handler, clear any pending UFS |
| 2050 | * interrupt status and disable all the UFS interrupts. |
| 2051 | */ |
| 2052 | ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), |
| 2053 | REG_INTERRUPT_STATUS); |
| 2054 | ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); |
| 2055 | |
Bhupesh Sharma | 2eb2a1e | 2023-08-14 11:58:20 +0530 | [diff] [blame] | 2056 | mb(); |
| 2057 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2058 | err = ufshcd_hba_enable(hba); |
| 2059 | if (err) { |
| 2060 | dev_err(hba->dev, "Host controller enable failed\n"); |
| 2061 | return err; |
| 2062 | } |
| 2063 | |
| 2064 | err = ufs_start(hba); |
| 2065 | if (err) |
| 2066 | return err; |
| 2067 | |
| 2068 | return 0; |
| 2069 | } |
| 2070 | |
| 2071 | int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) |
| 2072 | { |
| 2073 | int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", |
| 2074 | scsi_devp); |
| 2075 | |
| 2076 | return ret; |
| 2077 | } |
| 2078 | |
Marek Vasut | 91913a1 | 2023-08-16 17:05:50 +0200 | [diff] [blame] | 2079 | #if IS_ENABLED(CONFIG_BOUNCE_BUFFER) |
| 2080 | static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state) |
| 2081 | { |
| 2082 | #ifdef CONFIG_PHYS_64BIT |
| 2083 | struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); |
| 2084 | uintptr_t ubuf = (uintptr_t)state->user_buffer; |
| 2085 | size_t len = state->len_aligned; |
| 2086 | |
| 2087 | /* Check if below 32bit boundary */ |
| 2088 | if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) && |
| 2089 | ((ubuf >> 32) || (ubuf + len) >> 32)) { |
| 2090 | dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n", |
| 2091 | ubuf, ubuf + len); |
| 2092 | return 0; |
| 2093 | } |
| 2094 | #endif |
| 2095 | return 1; |
| 2096 | } |
| 2097 | #endif /* CONFIG_BOUNCE_BUFFER */ |
| 2098 | |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2099 | static struct scsi_ops ufs_ops = { |
| 2100 | .exec = ufs_scsi_exec, |
Marek Vasut | 91913a1 | 2023-08-16 17:05:50 +0200 | [diff] [blame] | 2101 | #if IS_ENABLED(CONFIG_BOUNCE_BUFFER) |
| 2102 | .buffer_aligned = ufs_scsi_buffer_aligned, |
| 2103 | #endif /* CONFIG_BOUNCE_BUFFER */ |
Faiz Abbas | 7feafb0 | 2019-10-15 18:24:36 +0530 | [diff] [blame] | 2104 | }; |
| 2105 | |
| 2106 | int ufs_probe_dev(int index) |
| 2107 | { |
| 2108 | struct udevice *dev; |
| 2109 | |
| 2110 | return uclass_get_device(UCLASS_UFS, index, &dev); |
| 2111 | } |
| 2112 | |
| 2113 | int ufs_probe(void) |
| 2114 | { |
| 2115 | struct udevice *dev; |
| 2116 | int ret, i; |
| 2117 | |
| 2118 | for (i = 0;; i++) { |
| 2119 | ret = uclass_get_device(UCLASS_UFS, i, &dev); |
| 2120 | if (ret == -ENODEV) |
| 2121 | break; |
| 2122 | } |
| 2123 | |
| 2124 | return 0; |
| 2125 | } |
| 2126 | |
| 2127 | U_BOOT_DRIVER(ufs_scsi) = { |
| 2128 | .id = UCLASS_SCSI, |
| 2129 | .name = "ufs_scsi", |
| 2130 | .ops = &ufs_ops, |
| 2131 | }; |