blob: 51bcb0c5322863476cb3edb5c863a34a56ce38d3 [file] [log] [blame]
Bhupesh Sharma068f06c2023-08-11 11:44:00 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2023 Bhupesh Sharma <bhupesh.sharma@linaro.org>
4 *
5 * Based on Linux driver
6 */
7
8#include <asm/io.h>
9#include <clk.h>
10#include <common.h>
11#include <dm.h>
12#include <dm/device_compat.h>
13#include <generic-phy.h>
14#include <ufs.h>
15
16#include <linux/bitops.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19
20#include "ufs.h"
21#include "ufs-qcom.h"
22
23#define MSEC_PER_SEC (1000L)
24#define USEC_PER_SEC (1000000L)
25#define NSEC_PER_SEC (1000000000L)
26
27static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
28 u32 clk_cycles);
29
30static int ufs_qcom_clk_get(struct udevice *dev,
31 const char *name, struct clk **clk_out, bool optional)
32{
33 struct clk *clk;
34 int err = 0;
35
36 clk = devm_clk_get(dev, name);
37 if (!IS_ERR(clk)) {
38 *clk_out = clk;
39 return 0;
40 }
41
42 err = PTR_ERR(clk);
43
44 if (optional && err == -ENOENT) {
45 *clk_out = NULL;
46 return 0;
47 }
48
49 if (err != -EPROBE_DEFER)
50 dev_err(dev, "failed to get %s err %d\n", name, err);
51
52 return err;
53}
54
55static int ufs_qcom_clk_enable(struct udevice *dev,
56 const char *name, struct clk *clk)
57{
58 int err = 0;
59
60 err = clk_prepare_enable(clk);
61 if (err)
62 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
63
64 return err;
65}
66
67static int ufs_qcom_enable_lane_clks(struct ufs_qcom_priv *priv)
68{
69 int err;
70 struct udevice *dev = priv->hba->dev;
71
72 if (priv->is_lane_clks_enabled)
73 return 0;
74
75 err = ufs_qcom_clk_enable(dev, "rx_lane0_sync_clk",
76 priv->rx_l0_sync_clk);
77 if (err)
78 return err;
79
80 err = ufs_qcom_clk_enable(dev, "tx_lane0_sync_clk",
81 priv->tx_l0_sync_clk);
82 if (err)
83 goto disable_rx_l0;
84
85 err = ufs_qcom_clk_enable(dev, "rx_lane1_sync_clk",
86 priv->rx_l1_sync_clk);
87 if (err)
88 goto disable_tx_l0;
89
90 priv->is_lane_clks_enabled = true;
91
92 return 0;
93
94disable_tx_l0:
95 clk_disable_unprepare(priv->tx_l0_sync_clk);
96disable_rx_l0:
97 clk_disable_unprepare(priv->rx_l0_sync_clk);
98
99 return err;
100}
101
102static int ufs_qcom_init_lane_clks(struct ufs_qcom_priv *priv)
103{
104 int err = 0;
105 struct udevice *dev = priv->hba->dev;
106
107 err = ufs_qcom_clk_get(dev, "rx_lane0_sync_clk",
108 &priv->rx_l0_sync_clk, false);
109 if (err)
110 return err;
111
112 err = ufs_qcom_clk_get(dev, "tx_lane0_sync_clk",
113 &priv->tx_l0_sync_clk, false);
114 if (err)
115 return err;
116
117 err = ufs_qcom_clk_get(dev, "rx_lane1_sync_clk",
118 &priv->rx_l1_sync_clk, false);
119 if (err)
120 return err;
121
122 return 0;
123}
124
125static int ufs_qcom_enable_core_clks(struct ufs_qcom_priv *priv)
126{
127 int err;
128 struct udevice *dev = priv->hba->dev;
129
130 if (priv->is_core_clks_enabled)
131 return 0;
132
133 err = ufs_qcom_clk_enable(dev, "core_clk", priv->core_clk);
134 if (err)
135 return err;
136
137 err = ufs_qcom_clk_enable(dev, "bus_aggr_clk", priv->bus_aggr_clk);
138 if (err)
139 goto disable_core_clk;
140
141 err = ufs_qcom_clk_enable(dev, "iface_clk", priv->iface_clk);
142 if (err)
143 goto disable_bus_aggr_clk;
144
145 err = ufs_qcom_clk_enable(dev, "core_clk_unipro", priv->core_clk_unipro);
146 if (err)
147 goto disable_iface_clk;
148
149 priv->is_core_clks_enabled = true;
150
151 return 0;
152
153disable_iface_clk:
154 clk_disable_unprepare(priv->iface_clk);
155disable_bus_aggr_clk:
156 clk_disable_unprepare(priv->bus_aggr_clk);
157disable_core_clk:
158 clk_disable_unprepare(priv->core_clk);
159
160 return err;
161}
162
163static int ufs_qcom_init_core_clks(struct ufs_qcom_priv *priv)
164{
165 int err = 0;
166 struct udevice *dev = priv->hba->dev;
167
168 err = ufs_qcom_clk_get(dev, "core_clk",
169 &priv->core_clk, false);
170 if (err)
171 return err;
172
173 err = ufs_qcom_clk_get(dev, "bus_aggr_clk",
174 &priv->bus_aggr_clk, false);
175 if (err)
176 return err;
177
178 err = ufs_qcom_clk_get(dev, "iface_clk", &priv->iface_clk, false);
179 if (err)
180 return err;
181
182 err = ufs_qcom_clk_get(dev, "core_clk_unipro", &priv->core_clk_unipro, false);
183 if (err)
184 return err;
185
186 /* ref_clk is optional */
187
188 return 0;
189}
190
191static void ufs_qcom_select_unipro_mode(struct ufs_qcom_priv *priv)
192{
193 ufshcd_rmwl(priv->hba, QUNIPRO_SEL,
194 ufs_qcom_cap_qunipro(priv) ? QUNIPRO_SEL : 0,
195 REG_UFS_CFG1);
196
197 if (priv->hw_ver.major == 0x05)
198 ufshcd_rmwl(priv->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
199
200 /* make sure above configuration is applied before we return */
201 mb();
202}
203
204/*
205 * ufs_qcom_reset - reset host controller and PHY
206 */
207static int ufs_qcom_reset(struct ufs_hba *hba)
208{
209 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
210 int ret = 0;
211
212 ret = reset_assert(&priv->core_reset);
213 if (ret) {
214 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
215 __func__, ret);
216 return ret;
217 }
218
219 /*
220 * The hardware requirement for delay between assert/deassert
221 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
222 * ~125us (4/32768). To be on the safe side add 200us delay.
223 */
224 udelay(210);
225
226 ret = reset_deassert(&priv->core_reset);
227 if (ret)
228 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
229 __func__, ret);
230
231 udelay(1100);
232
233 return 0;
234}
235
236static void ufs_qcom_set_caps(struct ufs_hba *hba)
237{
238 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
239
240 if (priv->hw_ver.major >= 0x2) {
241 priv->caps = UFS_QCOM_CAP_QUNIPRO |
242 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
243 }
244}
245
246/**
247 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
248 * @hba: host controller instance
249 *
250 * QCOM UFS host controller might have some non standard behaviours (quirks)
251 * than what is specified by UFSHCI specification. Advertise all such
252 * quirks to standard UFS host controller driver so standard takes them into
253 * account.
254 */
255static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
256{
257 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
258
259 if (priv->hw_ver.major == 0x01) {
260 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
261 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
262 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
263
264 if (priv->hw_ver.minor == 0x0001 && priv->hw_ver.step == 0x0001)
265 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
266
267 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
268 }
269
270 if (priv->hw_ver.major == 0x2) {
271 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
272
273 if (!ufs_qcom_cap_qunipro(priv))
274 /* Legacy UniPro mode still need following quirks */
275 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
276 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
277 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
278 }
279
280 if (priv->hw_ver.major > 0x3)
281 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
Caleb Connolly33233d72023-10-12 17:57:00 +0100282
283 if (ofnode_device_is_compatible(dev_ofnode(hba->dev), "qcom,sm8250-ufshc"))
284 hba->quirks |= UFSHCD_QUIRK_SKIP_CHANGE_POWER_MODE;
Bhupesh Sharma068f06c2023-08-11 11:44:00 +0530285}
286
287static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_hba *hba, bool enable)
288{
289 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
290
291 if (priv->dev_ref_clk_ctrl_mmio &&
292 (enable ^ priv->is_dev_ref_clk_enabled)) {
293 u32 temp = readl_relaxed(priv->dev_ref_clk_ctrl_mmio);
294
295 if (enable)
296 temp |= priv->dev_ref_clk_en_mask;
297 else
298 temp &= ~priv->dev_ref_clk_en_mask;
299
300 /*
301 * If we are here to disable this clock it might be immediately
302 * after entering into hibern8 in which case we need to make
303 * sure that device ref_clk is active for specific time after
304 * hibern8 enter.
305 */
306 if (!enable)
307 udelay(10);
308
309 writel_relaxed(temp, priv->dev_ref_clk_ctrl_mmio);
310
311 /*
312 * Make sure the write to ref_clk reaches the destination and
313 * not stored in a Write Buffer (WB).
314 */
315 readl(priv->dev_ref_clk_ctrl_mmio);
316
317 /*
318 * If we call hibern8 exit after this, we need to make sure that
319 * device ref_clk is stable for at least 1us before the hibern8
320 * exit command.
321 */
322 if (enable)
323 udelay(1);
324
325 priv->is_dev_ref_clk_enabled = enable;
326 }
327}
328
329/**
330 * ufs_qcom_setup_clocks - enables/disable clocks
331 * @hba: host controller instance
332 * @on: If true, enable clocks else disable them.
333 * @status: PRE_CHANGE or POST_CHANGE notify
334 *
335 * Returns 0 on success, non-zero on failure.
336 */
337static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
338 enum ufs_notify_change_status status)
339{
340 switch (status) {
341 case PRE_CHANGE:
342 if (!on) {
343 /* disable device ref_clk */
344 ufs_qcom_dev_ref_clk_ctrl(hba, false);
345 }
346 break;
347 case POST_CHANGE:
348 if (on) {
349 /* enable the device ref clock for HS mode*/
350 ufs_qcom_dev_ref_clk_ctrl(hba, true);
351 }
352 break;
353 }
354
355 return 0;
356}
357
358static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
359{
360 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
361 struct phy phy;
362 int ret;
363
364 /* Reset UFS Host Controller and PHY */
365 ret = ufs_qcom_reset(hba);
366 if (ret)
367 dev_warn(hba->dev, "%s: host reset returned %d\n",
368 __func__, ret);
369
370 /* get phy */
371 ret = generic_phy_get_by_name(hba->dev, "ufsphy", &phy);
372 if (ret) {
373 dev_warn(hba->dev, "%s: Unable to get QMP ufs phy, ret = %d\n",
374 __func__, ret);
375 return ret;
376 }
377
378 /* phy initialization */
379 ret = generic_phy_init(&phy);
380 if (ret) {
381 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
382 __func__, ret);
383 return ret;
384 }
385
386 /* power on phy */
387 ret = generic_phy_power_on(&phy);
388 if (ret) {
389 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
390 __func__, ret);
391 goto out_disable_phy;
392 }
393
394 ufs_qcom_select_unipro_mode(priv);
395
396 return 0;
397
398out_disable_phy:
399 generic_phy_exit(&phy);
400
401 return ret;
402}
403
404static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
405{
406 int err, retry_count = 50;
407 u32 tx_fsm_val = 0;
408
409 do {
410 err = ufshcd_dme_get(hba,
411 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
412 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
413 &tx_fsm_val);
414 if (err || tx_fsm_val == TX_FSM_HIBERN8)
415 break;
416
417 /* max. 200us */
418 udelay(200);
419 retry_count--;
420 } while (retry_count != 0);
421
422 /*
423 * check the state again.
424 */
425 err = ufshcd_dme_get(hba,
426 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
427 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
428 &tx_fsm_val);
429
430 if (err) {
431 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
432 __func__, err);
433 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
434 err = tx_fsm_val;
435 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
436 __func__, err);
437 }
438
439 return err;
440}
441
442/*
443 * The UTP controller has a number of internal clock gating cells (CGCs).
444 * Internal hardware sub-modules within the UTP controller control the CGCs.
445 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
446 * in a specific operation, UTP controller CGCs are by default disabled and
447 * this function enables them (after every UFS link startup) to save some power
448 * leakage.
449 */
450static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
451{
452 ufshcd_writel(hba,
453 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
454 REG_UFS_CFG2);
455
456 /* Ensure that HW clock gating is enabled before next operations */
457 mb();
458}
459
460static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
461 enum ufs_notify_change_status status)
462{
463 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
464 int err = 0;
465
466 switch (status) {
467 case PRE_CHANGE:
468 ufs_qcom_power_up_sequence(hba);
469 /*
470 * The PHY PLL output is the source of tx/rx lane symbol
471 * clocks, hence, enable the lane clocks only after PHY
472 * is initialized.
473 */
474 err = ufs_qcom_enable_core_clks(priv);
475 if (err < 0)
476 return err;
477
478 err = ufs_qcom_enable_lane_clks(priv);
479 if (err < 0)
480 return err;
481 break;
482 case POST_CHANGE:
483 /* check if UFS PHY moved from DISABLED to HIBERN8 */
484 err = ufs_qcom_check_hibern8(hba);
485 ufs_qcom_enable_hw_clk_gating(hba);
486 break;
487 default:
488 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
489 err = -EINVAL;
490 break;
491 }
492
493 return err;
494}
495
496/*
497 * Returns zero for success and non-zero in case of a failure
498 */
499static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
500 u32 hs, u32 rate, bool update_link_startup_timer)
501{
502 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
503 u32 core_clk_period_in_ns;
504 u32 tx_clk_cycles_per_us = 0;
505 unsigned long core_clk_rate = 0;
506 u32 core_clk_cycles_per_us = 0;
507
508 static u32 pwm_fr_table[][2] = {
509 {UFS_PWM_G1, 0x1},
510 {UFS_PWM_G2, 0x1},
511 {UFS_PWM_G3, 0x1},
512 {UFS_PWM_G4, 0x1},
513 };
514
515 static u32 hs_fr_table_rA[][2] = {
516 {UFS_HS_G1, 0x1F},
517 {UFS_HS_G2, 0x3e},
518 {UFS_HS_G3, 0x7D},
519 };
520
521 static u32 hs_fr_table_rB[][2] = {
522 {UFS_HS_G1, 0x24},
523 {UFS_HS_G2, 0x49},
524 {UFS_HS_G3, 0x92},
525 };
526
527 /*
528 * The Qunipro controller does not use following registers:
529 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
530 * UFS_REG_PA_LINK_STARTUP_TIMER
531 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
532 * Aggregation logic.
533 */
534 if (ufs_qcom_cap_qunipro(priv))
535 return 0;
536
537 if (gear == 0) {
538 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
539 return -EINVAL;
540 }
541
542 core_clk_rate = clk_get_rate(priv->core_clk);
543
544 /* If frequency is smaller than 1MHz, set to 1MHz */
545 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
546 core_clk_rate = DEFAULT_CLK_RATE_HZ;
547
548 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
549 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
550 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
551 /*
552 * make sure above write gets applied before we return from
553 * this function.
554 */
555 mb();
556 }
557
558 if (ufs_qcom_cap_qunipro(priv))
559 return 0;
560
561 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
562 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
563 core_clk_period_in_ns &= MASK_CLK_NS_REG;
564
565 switch (hs) {
566 case FASTAUTO_MODE:
567 case FAST_MODE:
568 if (rate == PA_HS_MODE_A) {
569 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
570 dev_err(hba->dev,
571 "%s: index %d exceeds table size %zu\n",
572 __func__, gear,
573 ARRAY_SIZE(hs_fr_table_rA));
574 return -EINVAL;
575 }
576 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
577 } else if (rate == PA_HS_MODE_B) {
578 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
579 dev_err(hba->dev,
580 "%s: index %d exceeds table size %zu\n",
581 __func__, gear,
582 ARRAY_SIZE(hs_fr_table_rB));
583 return -EINVAL;
584 }
585 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
586 } else {
587 dev_err(hba->dev, "%s: invalid rate = %d\n",
588 __func__, rate);
589 return -EINVAL;
590 }
591 break;
592 case SLOWAUTO_MODE:
593 case SLOW_MODE:
594 if (gear > ARRAY_SIZE(pwm_fr_table)) {
595 dev_err(hba->dev,
596 "%s: index %d exceeds table size %zu\n",
597 __func__, gear,
598 ARRAY_SIZE(pwm_fr_table));
599 return -EINVAL;
600 }
601 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
602 break;
603 case UNCHANGED:
604 default:
605 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
606 return -EINVAL;
607 }
608
609 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
610 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
611 /* this register 2 fields shall be written at once */
612 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
613 REG_UFS_TX_SYMBOL_CLK_NS_US);
614 /*
615 * make sure above write gets applied before we return from
616 * this function.
617 */
618 mb();
619 }
620
621 if (update_link_startup_timer && priv->hw_ver.major != 0x5) {
622 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
623 REG_UFS_CFG0);
624 /*
625 * make sure that this configuration is applied before
626 * we return
627 */
628 mb();
629 }
630
631 return 0;
632}
633
634static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
635 u32 clk_cycles)
636{
637 int err;
638 u32 core_clk_ctrl_reg;
639
640 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
641 return -EINVAL;
642
643 err = ufshcd_dme_get(hba,
644 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
645 &core_clk_ctrl_reg);
646 if (err)
647 return err;
648
649 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
650 core_clk_ctrl_reg |= clk_cycles;
651
652 /* Clear CORE_CLK_DIV_EN */
653 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
654
655 return ufshcd_dme_set(hba,
656 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
657 core_clk_ctrl_reg);
658}
659
660/* TBD: Move this to common framework layer */
661u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
662{
663 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
664 switch (hba->version) {
665 case UFSHCI_VERSION_10:
666 case UFSHCI_VERSION_11:
667 return UFS_UNIPRO_VER_1_41;
668
669 case UFSHCI_VERSION_20:
670 case UFSHCI_VERSION_21:
671 default:
672 return UFS_UNIPRO_VER_1_6;
673 }
674}
675
676static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
677 enum ufs_notify_change_status status)
678{
679 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
680 int err = 0;
681
682 switch (status) {
683 case PRE_CHANGE:
684 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
685 0, true)) {
686 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
687 __func__);
688 return -EINVAL;
689 }
690
691 if (ufs_qcom_cap_qunipro(priv))
692 /*
693 * set unipro core clock cycles to 150 & clear clock
694 * divider
695 */
696 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
697 150);
698
699 /*
700 * Some UFS devices (and may be host) have issues if LCC is
701 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
702 * before link startup which will make sure that both host
703 * and device TX LCC are disabled once link startup is
704 * completed.
705 */
706 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
707 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
708
709 break;
710 default:
711 break;
712 }
713
714 return err;
715}
716
717/**
718 * ufs_qcom_init - bind phy with controller
719 * @hba: host controller instance
720 *
721 * Powers up PHY enabling clocks and regulators.
722 *
723 * Returns -EPROBE_DEFER if binding fails, returns negative error
724 * on phy power up failure and returns zero on success.
725 */
726static int ufs_qcom_init(struct ufs_hba *hba)
727{
728 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
729 int err;
730
731 priv->hba = hba;
732
733 /* setup clocks */
734 ufs_qcom_setup_clocks(hba, true, PRE_CHANGE);
735 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
736
737 ufs_qcom_get_controller_revision(hba, &priv->hw_ver.major,
738 &priv->hw_ver.minor, &priv->hw_ver.step);
739 dev_info(hba->dev, "Qcom UFS HC version: %d.%d.%d\n", priv->hw_ver.major,
740 priv->hw_ver.minor, priv->hw_ver.step);
741
742 /*
743 * for newer controllers, device reference clock control bit has
744 * moved inside UFS controller register address space itself.
745 */
746 if (priv->hw_ver.major >= 0x02) {
747 priv->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
748 priv->dev_ref_clk_en_mask = BIT(26);
749 }
750
751 err = ufs_qcom_init_core_clks(priv);
752 if (err) {
753 dev_err(hba->dev, "failed to initialize core clocks, err:%d\n", err);
754 return err;
755 }
756
757 err = ufs_qcom_init_lane_clks(priv);
758 if (err) {
759 dev_err(hba->dev, "failed to initialize lane clocks, err:%d\n", err);
760 return err;
761 }
762
763 ufs_qcom_set_caps(hba);
764 ufs_qcom_advertise_quirks(hba);
765 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
766
767 /* Power up the PHY using UFS_HS_G3. */
768 priv->hs_gear = UFS_HS_G3;
769
770 return 0;
771}
772
773static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
774{
775 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
776
777 dev_info(hba->dev, "clk: %s, rate: %lu\n", "bus_aggr_clk",
778 clk_get_rate(priv->bus_aggr_clk));
779 dev_info(hba->dev, "clk: %s, rate: %lu\n", "iface_clk",
780 clk_get_rate(priv->iface_clk));
781 dev_info(hba->dev, "clk: %s, rate: %lu\n", "core_clk_unipro",
782 clk_get_rate(priv->core_clk_unipro));
783}
784
785static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
786{
787 u32 reg;
788 struct ufs_qcom_priv *priv = dev_get_priv(hba->dev);
789
790 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
791 "HCI Vendor Specific Registers ");
792
793 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_UFS_DBG_RD_REG_OCSC);
794 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
795
796 reg = ufshcd_readl(hba, REG_UFS_CFG1);
797 reg |= UTP_DBG_RAMS_EN;
798 ufshcd_writel(hba, reg, REG_UFS_CFG1);
799
800 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_UFS_DBG_RD_EDTL_RAM);
801 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
802
803 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_UFS_DBG_RD_DESC_RAM);
804 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
805
806 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_UFS_DBG_RD_PRDT_RAM);
807 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
808
809 /* clear bit 17 - UTP_DBG_RAMS_EN */
810 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
811
812 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_UAWM);
813 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
814
815 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_UARM);
816 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
817
818 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_TXUC);
819 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
820
821 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_RXUC);
822 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
823
824 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_DFC);
825 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
826
827 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_TRLUT);
828 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
829
830 reg = ufs_qcom_get_debug_reg_offset(priv, UFS_DBG_RD_REG_TMRLUT);
831 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
832
833 ufshcd_print_clk_freqs(hba);
834}
835
836static struct ufs_hba_ops ufs_qcom_hba_ops = {
837 .init = ufs_qcom_init,
838 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
839 .hce_enable_notify = ufs_qcom_hce_enable_notify,
840 .link_startup_notify = ufs_qcom_link_startup_notify,
841};
842
843static int ufs_qcom_probe(struct udevice *dev)
844{
845 struct ufs_qcom_priv *priv = dev_get_priv(dev);
846 int ret;
847
848 /* get resets */
849 ret = reset_get_by_name(dev, "rst", &priv->core_reset);
850 if (ret) {
851 dev_err(dev, "failed to get reset, ret:%d\n", ret);
852 return ret;
853 }
854
855 ret = ufshcd_probe(dev, &ufs_qcom_hba_ops);
856 if (ret) {
857 dev_err(dev, "ufshcd_probe() failed, ret:%d\n", ret);
858 return ret;
859 }
860
861 return 0;
862}
863
864static int ufs_qcom_bind(struct udevice *dev)
865{
866 struct udevice *scsi_dev;
867
868 return ufs_scsi_bind(dev, &scsi_dev);
869}
870
871static const struct udevice_id ufs_qcom_ids[] = {
872 { .compatible = "qcom,ufshc" },
873 {},
874};
875
876U_BOOT_DRIVER(qcom_ufshcd) = {
877 .name = "qcom-ufshcd",
878 .id = UCLASS_UFS,
879 .of_match = ufs_qcom_ids,
880 .probe = ufs_qcom_probe,
881 .bind = ufs_qcom_bind,
882 .priv_auto = sizeof(struct ufs_qcom_priv),
883};