diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-08-06 15:44:25 +0300 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-08-06 15:44:25 +0300 |
| commit | d7edcc7c9109f165efcf5d767fed21578c37c46c (patch) | |
| tree | 199431e7049f717f229cef4a6bb13892cffeb9f5 /drivers/ufs | |
| parent | 479058002c32b77acac43e883b92174e22c4be2d (diff) | |
| parent | 7038db703317617ef3691fbbb7259d4cdf208cf2 (diff) | |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull more SCSI updates from James Bottomley:
"This is mostly fixes and cleanups and code reworks that trickled in
across the merge window and the weeks leading up. The only substantive
update is the Mediatek ufs driver which accounts for the bulk of the
additions"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (37 commits)
scsi: libsas: Use a bool for sas_deform_port() second argument
scsi: libsas: Move declarations of internal functions to sas_internal.h
scsi: libsas: Make sas_get_ata_info() static
scsi: libsas: Simplify sas_ata_wait_eh()
scsi: libsas: Refactor dev_is_sata()
scsi: sd: Make sd shutdown issue START STOP UNIT appropriately
scsi: arm64: dts: mediatek: mt8195: Add UFSHCI node
scsi: dt-bindings: mediatek,ufs: add MT8195 compatible and update clock nodes
scsi: dt-bindings: mediatek,ufs: Add ufs-disable-mcq flag for UFS host
scsi: ufs: ufs-mediatek: Add UFS host support for MT8195 SoC
scsi: ufs: ufs-pci: Remove control of UIC Completion interrupt for Intel MTL
scsi: ufs: core: Do not write interrupt enable register unnecessarily
scsi: ufs: core: Set and clear UIC Completion interrupt as needed
scsi: ufs: core: Remove duplicated code in ufshcd_send_bsg_uic_cmd()
scsi: ufs: core: Move ufshcd_enable_intr() and ufshcd_disable_intr()
scsi: ufs: ufs-pci: Remove UFS PCI driver's ->late_init() call back
scsi: ufs: ufs-pci: Fix default runtime and system PM levels
scsi: ufs: ufs-pci: Fix hibernate state transition for Intel MTL-like host controllers
scsi: ufs: host: mediatek: Support FDE (AES) clock scaling
scsi: ufs: host: mediatek: Support clock scaling with Vcore binding
...
Diffstat (limited to 'drivers/ufs')
| -rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 3 | ||||
| -rw-r--r-- | drivers/ufs/core/ufshcd.c | 105 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 330 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-mediatek.h | 32 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 8 | ||||
| -rw-r--r-- | drivers/ufs/host/ufshcd-pci.c | 33 |
6 files changed, 389 insertions, 122 deletions
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 00948378a719..4bd7d491e3c5 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -5,6 +5,7 @@ #include <linux/string.h> #include <linux/bitfield.h> #include <linux/unaligned.h> +#include <linux/string_choices.h> #include <ufs/ufs.h> #include <ufs/unipro.h> @@ -1516,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \ ret = -EINVAL; \ goto out; \ } \ - ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \ + ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \ out: \ up(&hba->host_sem); \ return ret; \ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index fd8015ed36a4..96ad57c3144b 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -364,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_disable_irq); +/** + * ufshcd_enable_intr - enable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val | intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val & ~intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + static void ufshcd_configure_wb(struct ufs_hba *hba) { if (!ufshcd_is_wb_allowed(hba)) @@ -2596,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) */ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { + unsigned long flags; int ret; if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) @@ -2605,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2682,32 +2715,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } /** - * ufshcd_enable_intr - enable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set |= intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - -/** - * ufshcd_disable_intr - disable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set &= ~intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - -/** * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request * descriptor according to request * @hba: per adapter instance @@ -4318,7 +4325,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) unsigned long flags; u8 status; int ret; - bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -4329,15 +4335,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) goto out_unlock; } hba->uic_async_done = &uic_async_done; - if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { - ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); - /* - * Make sure UIC command completion interrupt is disabled before - * issuing UIC command. - */ - ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - reenable_intr = true; - } + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); spin_unlock_irqrestore(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { @@ -4381,9 +4379,7 @@ out: spin_lock_irqsave(hba->host->host_lock, flags); hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; - if (reenable_intr) - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); - if (ret) { + if (ret && !hba->pm_op_in_progress) { ufshcd_set_link_broken(hba); ufshcd_schedule_eh_work(hba); } @@ -4391,6 +4387,14 @@ out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); + /* + * If the h8 exit fails during the runtime resume process, it becomes + * stuck and cannot be recovered through the error handler. To fix + * this, use link recovery instead of the error handler. + */ + if (ret && hba->pm_op_in_progress) + ret = ufshcd_link_recovery(hba); + return ret; } @@ -4405,28 +4409,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; + if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) || + uic_cmd->command != UIC_CMD_DME_SET) + return ufshcd_send_uic_cmd(hba, uic_cmd); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) return 0; ufshcd_hold(hba); - - if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) && - uic_cmd->command == UIC_CMD_DME_SET) { - ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); - goto out; - } - - mutex_lock(&hba->uic_cmd_mutex); - ufshcd_add_delay_before_dme_cmd(hba); - - ret = __ufshcd_send_uic_cmd(hba, uic_cmd); - if (!ret) - ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); - - mutex_unlock(&hba->uic_cmd_mutex); - -out: + ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); ufshcd_release(hba); + return ret; } diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 182f58d0c9db..86ae73b89d4d 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci" }, + { .compatible = "mediatek,mt8195-ufshci" }, {}, }; MODULE_DEVICE_TABLE(of, ufs_mtk_of_match); @@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); + return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE; } static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); + return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL; } static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); + return host->caps & UFS_MTK_CAP_BROKEN_VCC; } static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); + return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO; } static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX); + return host->caps & UFS_MTK_CAP_TX_SKEW_FIX; } static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS); + return host->caps & UFS_MTK_CAP_RTFF_MTCMOS; } static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM); + return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM; +} + +static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + + return mclk->ufs_sel_clki && + mclk->ufs_sel_max_clki && + mclk->ufs_sel_min_clki; } static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) @@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, REG_UFS_XOUFS_CTRL); + + /* DDR_EN setting */ + if (host->ip_ver >= IP_VER_MT6989) { + ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), + 0x453000, REG_UFS_MMIO_OPT_CTRL_0); + } + } return 0; @@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); - ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); + /* + * If clock on timeout, assume clock is off, notify tfa do clock + * off setting.(keep DIFN disable, release resource) + * If clock off timeout, assume clock will off finally, + * set ref_clk_enabled directly.(keep DIFN disable, keep resource) + */ + if (on) + ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); + else + host->ref_clk_enabled = false; return -ETIMEDOUT; @@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos")) host->caps |= UFS_MTK_CAP_RTFF_MTCMOS; + if (of_property_read_bool(np, "mediatek,ufs-broken-rtc")) + host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC; + dev_info(hba->dev, "caps: 0x%x", host->caps); } @@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, return ret; } +static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct blk_mq_tag_set *tag_set = &hba->host->tag_set; + struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT]; + unsigned int nr = map->nr_queues; + unsigned int q_index; + + q_index = map->mq_map[cpu]; + if (q_index > nr) { + dev_err(hba->dev, "hwq index %d exceed %d\n", + q_index, nr); + return MTK_MCQ_INVALID_IRQ; + } + + return host->mcq_intr_info[q_index].irq; +} + +static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu) +{ + unsigned int irq, _cpu; + int ret; + + irq = ufs_mtk_mcq_get_irq(hba, cpu); + if (irq == MTK_MCQ_INVALID_IRQ) { + dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu); + return; + } + + /* force migrate irq of cpu0 to cpu3 */ + _cpu = (cpu == 0) ? 3 : cpu; + ret = irq_set_affinity(irq, cpumask_of(_cpu)); + if (ret) { + dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n", + irq, _cpu); + return; + } + dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu); +} + +static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver) +{ + bool is_legacy = false; + + switch (hw_ip_ver) { + case IP_LEGACY_VER_MT6893: + case IP_LEGACY_VER_MT6781: + /* can add other legacy chipset ID here accordingly */ + is_legacy = true; + break; + default: + break; + } + dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy); + + return is_legacy; +} + +/* + * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since + * project MT6878. In order to perform correct version comparison, + * version number is changed by SW for the following projects. + * IP_VER_MT6983 0x00360000 to 0x10360000 + * IP_VER_MT6897 0x01440000 to 0x10440000 + * IP_VER_MT6989 0x01450000 to 0x10450000 + * IP_VER_MT6991 0x01460000 to 0x10460000 + */ +static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 hw_ip_ver; + + hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) || + ((hw_ip_ver & (0xFF << 24)) == 0)) { + hw_ip_ver &= ~(0xFF << 24); + hw_ip_ver |= (0x1 << 28); + } + + host->ip_ver = hw_ip_ver; + + host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver); +} + static void ufs_mtk_get_controller_version(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; - struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki, *clki_tmp; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; /* * Find private clocks and store them in struct ufs_mtk_clk. @@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) host->mclk.ufs_sel_min_clki = clki; clk_disable_unprepare(clki->clk); list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde")) { + host->mclk.ufs_fde_clki = clki; + } else if (!strcmp(clki->name, "ufs_fde_max_src")) { + host->mclk.ufs_fde_max_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde_min_src")) { + host->mclk.ufs_fde_min_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); } } - if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki || - !mclk->ufs_sel_min_clki) { + list_for_each_entry(clki, head, list) { + dev_info(hba->dev, "clk \"%s\" present", clki->name); + } + + if (!ufs_mtk_is_clk_scale_ready(hba)) { hba->caps &= ~UFSHCD_CAP_CLK_SCALING; dev_info(hba->dev, "%s: Clk-scaling not ready. Feature disabled.", __func__); + return; + } + + /* + * Default get vcore if dts have these settings. + * No matter clock scaling support or not. (may disable by customer) + */ + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + return; + } + + if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min", + &volt)) { + dev_info(dev, "failed to get clk-scale-up-vcore-min"); + return; + } + + host->mclk.reg_vcore = reg; + host->mclk.vcore_volt = volt; + + /* If default boot is max gear, request vcore */ + if (reg && volt && host->clk_scale_up) { + if (regulator_set_voltage(reg, volt, INT_MAX)) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + } } } @@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clk scaling*/ hba->caps |= UFSHCD_CAP_CLK_SCALING; + host->clk_scale_up = true; /* default is max freq */ /* Set runtime pm delay to replace default */ shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS; hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; - hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC) + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); if (host->caps & UFS_MTK_CAP_DISABLE_AH8) @@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba) ufs_mtk_setup_clocks(hba, true, POST_CHANGE); - host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + ufs_mtk_get_hw_ip_version(hba); goto out; @@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; u16 mid = dev_info->wmanufacturerid; + unsigned int cpu; + + if (hba->mcq_enabled) { + /* Iterate all cpus to set affinity for mcq irqs */ + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + ufs_mtk_mcq_set_irq_affinity(hba, cpu); + } if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); @@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, hba->vps->ondemand_data.downdifferential = 20; } -/** - * ufs_mtk_clk_scale - Internal clk scaling operation - * - * MTK platform supports clk scaling by switching parent of ufs_sel(mux). - * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. - * Max and min clocks rate of ufs_sel defined in dts should match rate of - * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. - * This prevent changing rate of pll clock that is shared between modules. - * - * @hba: per adapter instance - * @scale_up: True for scaling up and false for scaling down - */ -static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki = mclk->ufs_sel_clki; - int ret = 0; + struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki; + struct regulator *reg; + int volt, ret = 0; + bool clk_bind_vcore = false; + bool clk_fde_scale = false; + + if (!hba->clk_scaling.is_initialized) + return; + + if (!clki || !fde_clki) + return; + + reg = host->mclk.reg_vcore; + volt = host->mclk.vcore_volt; + if (reg && volt != 0) + clk_bind_vcore = true; + + if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki) + clk_fde_scale = true; ret = clk_prepare_enable(clki->clk); if (ret) { @@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) return; } + if (clk_fde_scale) { + ret = clk_prepare_enable(fde_clki->clk); + if (ret) { + dev_info(hba->dev, + "fde clk_prepare_enable() fail, ret: %d\n", ret); + return; + } + } + if (scale_up) { + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk); - clki->curr_freq = clki->max_freq; + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + } + + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_max_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + } + } } else { + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_min_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk); - clki->curr_freq = clki->min_freq; - } + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + goto out; + } - if (ret) { - dev_info(hba->dev, - "Failed to set ufs_sel_clki, ret: %d\n", ret); + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } } +out: clk_disable_unprepare(clki->clk); + if (clk_fde_scale) + clk_disable_unprepare(fde_clki->clk); +} + +/** + * ufs_mtk_clk_scale - Internal clk scaling operation + * + * MTK platform supports clk scaling by switching parent of ufs_sel(mux). + * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. + * Max and min clocks rate of ufs_sel defined in dts should match rate of + * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. + * This prevent changing rate of pll clock that is shared between modules. + * + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scaling down + */ +static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + struct ufs_clk_info *clki = mclk->ufs_sel_clki; + + if (host->clk_scale_up == scale_up) + goto out; + + if (scale_up) + _ufs_mtk_clk_scale(hba, true); + else + _ufs_mtk_clk_scale(hba, false); + + host->clk_scale_up = scale_up; + + /* Must always set before clk_set_rate() */ + if (scale_up) + clki->curr_freq = clki->max_freq; + else + clki->curr_freq = clki->min_freq; +out: trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk)); } diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index 05d76a6bd772..e46dc5fa209d 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -133,6 +133,8 @@ enum ufs_mtk_host_caps { UFS_MTK_CAP_DISABLE_MCQ = 1 << 8, /* Control MTCMOS with RTFF */ UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9, + + UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10, }; struct ufs_mtk_crypt_cfg { @@ -147,6 +149,11 @@ struct ufs_mtk_clk { struct ufs_clk_info *ufs_sel_clki; /* Mux */ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */ + struct ufs_clk_info *ufs_fde_clki; /* Mux */ + struct ufs_clk_info *ufs_fde_max_clki; /* Max src */ + struct ufs_clk_info *ufs_fde_min_clki; /* Min src */ + struct regulator *reg_vcore; + int vcore_volt; }; struct ufs_mtk_hw_ver { @@ -176,9 +183,11 @@ struct ufs_mtk_host { bool mphy_powered_on; bool unipro_lpm; bool ref_clk_enabled; + bool clk_scale_up; u16 ref_clk_ungating_wait_us; u16 ref_clk_gating_wait_us; u32 ip_ver; + bool legacy_ip_ver; bool mcq_set_intr; bool is_mcq_intr_enabled; @@ -192,4 +201,27 @@ struct ufs_mtk_host { /* MTK RTT support number */ #define MTK_MAX_NUM_RTT 2 +/* UFSHCI MTK ip version value */ +enum { + /* UFSHCI 3.1 */ + IP_VER_MT6983 = 0x10360000, + IP_VER_MT6878 = 0x10420200, + + /* UFSHCI 4.0 */ + IP_VER_MT6897 = 0x10440000, + IP_VER_MT6989 = 0x10450000, + IP_VER_MT6899 = 0x10450100, + IP_VER_MT6991_A0 = 0x10460000, + IP_VER_MT6991_B0 = 0x10470000, + IP_VER_MT6993 = 0x10480000, + + IP_VER_NONE = 0xFFFFFFFF +}; + +enum ip_ver_legacy { + IP_LEGACY_VER_MT6781 = 0x10380000, + IP_LEGACY_VER_MT6879 = 0x10360000, + IP_LEGACY_VER_MT6893 = 0x20160706 +}; + #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 4bbe4de1679b..76fc70503a62 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1898,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba) return 0; } -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, struct devfreq_dev_profile *p, struct devfreq_simple_ondemand_data *d) @@ -1910,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, hba->clk_scaling.suspend_on_no_request = true; } -#else -static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, - struct devfreq_dev_profile *p, - struct devfreq_simple_ondemand_data *data) -{ -} -#endif /* Resources */ static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 996387906aa1..b39239f641f2 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -22,17 +22,12 @@ #define MAX_SUPP_MAC 64 -struct ufs_host { - void (*late_init)(struct ufs_hba *hba); -}; - enum intel_ufs_dsm_func_id { INTEL_DSM_FNS = 0, INTEL_DSM_RESET = 1, }; struct intel_host { - struct ufs_host ufs_host; u32 dsm_fns; u32 active_ltr; u32 idle_ltr; @@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } -static void ufs_intel_lkf_late_init(struct ufs_hba *hba) +static int ufs_intel_lkf_init(struct ufs_hba *hba) { + int err; + + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_CRYPTO; + err = ufs_intel_common_init(hba); /* LKF always needs a full reset, so set PM accordingly */ if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { hba->spm_lvl = UFS_PM_LVL_6; @@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba) hba->spm_lvl = UFS_PM_LVL_5; hba->rpm_lvl = UFS_PM_LVL_5; } -} - -static int ufs_intel_lkf_init(struct ufs_hba *hba) -{ - struct ufs_host *ufs_host; - int err; - - hba->nop_out_timeout = 200; - hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; - hba->caps |= UFSHCD_CAP_CRYPTO; - err = ufs_intel_common_init(hba); - ufs_host = ufshcd_get_variant(hba); - ufs_host->late_init = ufs_intel_lkf_late_init; return err; } @@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) static int ufs_intel_mtl_init(struct ufs_hba *hba) { + hba->rpm_lvl = UFS_PM_LVL_2; + hba->spm_lvl = UFS_PM_LVL_2; hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; return ufs_intel_common_init(hba); } @@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) static int ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct ufs_host *ufs_host; struct ufs_hba *hba; void __iomem *mmio_base; int err; @@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - ufs_host = ufshcd_get_variant(hba); - if (ufs_host && ufs_host->late_init) - ufs_host->late_init(hba); - pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); |