diff options
50 files changed, 1794 insertions, 452 deletions
diff --git a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml index 3e62b5d4da61..6e2edd43fc2a 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml @@ -8,8 +8,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: MediaTek Universal Flash Storage (UFS) M-PHY maintainers: - - Stanley Chu <stanley.chu@mediatek.com> - Chunfeng Yun <chunfeng.yun@mediatek.com> + - Peter Wang <peter.wang@mediatek.com> + - Chaotian Jing <chaotian.jing@mediatek.com> description: | UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro. diff --git a/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml b/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml new file mode 100644 index 000000000000..c00ec342d574 --- /dev/null +++ b/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ufs/amd,versal2-ufs.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: AMD Versal Gen 2 UFS Host Controller + +maintainers: + - Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> + +allOf: + - $ref: ufs-common.yaml + +properties: + compatible: + const: amd,versal2-ufs + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + items: + - const: core + + power-domains: + maxItems: 1 + + resets: + maxItems: 2 + + reset-names: + items: + - const: host + - const: phy + +required: + - reg + - clocks + - clock-names + - resets + - reset-names + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + ufs@f10b0000 { + compatible = "amd,versal2-ufs"; + reg = <0xf10b0000 0x1000>; + clocks = <&ufs_core_clk>; + clock-names = "core"; + resets = <&scmi_reset 4>, <&scmi_reset 35>; + reset-names = "host", "phy"; + interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; + freq-table-hz = <0 0>; + }; diff --git a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml index 1dec54fb00f3..15c347f5e660 100644 --- a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml @@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Mediatek Universal Flash Storage (UFS) Controller maintainers: - - Stanley Chu <stanley.chu@mediatek.com> + - Peter Wang <peter.wang@mediatek.com> + - Chaotian Jing <chaotian.jing@mediatek.com> properties: compatible: diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml index 1dd41f6d5258..516bb61a4624 100644 --- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml @@ -88,7 +88,6 @@ allOf: - const: ice_core_clk reg: minItems: 2 - maxItems: 2 reg-names: minItems: 2 required: @@ -117,7 +116,6 @@ allOf: - const: tx_lane0_sync_clk - const: rx_lane0_sync_clk reg: - minItems: 1 maxItems: 1 reg-names: maxItems: 1 @@ -147,7 +145,6 @@ allOf: - const: ice_core_clk reg: minItems: 2 - maxItems: 2 reg-names: minItems: 2 required: diff --git a/MAINTAINERS b/MAINTAINERS index 46126ce2f968..ccd0f7d0c2bc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26339,6 +26339,13 @@ F: Documentation/devicetree/bindings/ufs/ F: Documentation/scsi/ufs.rst F: drivers/ufs/core/ +UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER AMD VERSAL2 +M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> +M: Ajay Neeli <ajay.neeli@amd.com> +S: Maintained +F: Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml +F: drivers/ufs/host/ufs-amd-versal2.c + UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS M: Pedro Sousa <pedrom.sousa@synopsys.com> L: linux-scsi@vger.kernel.org @@ -26355,6 +26362,7 @@ F: drivers/ufs/host/ufs-exynos* UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS M: Peter Wang <peter.wang@mediatek.com> +M: Chaotian Jing <chaotian.jing@mediatek.com> R: Stanley Jhu <chu.stanley@gmail.com> L: linux-scsi@vger.kernel.org L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index f2140fc06ba0..15e18d50dcc6 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev) } /** + * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on + * system restart. + * @dev: target ATA device + * + * RETURNS: + * true if the disk should be stopped, otherwise false. + */ +bool ata_acpi_dev_manage_restart(struct ata_device *dev) +{ + struct device *tdev; + + /* + * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the + * ata_device instead of the ata_port. + */ + if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA) + tdev = &dev->tdev; + else + tdev = &dev->link->ap->tdev; + + if (!is_acpi_device_node(tdev->fwnode)) + return false; + return acpi_bus_power_manageable(ACPI_HANDLE(tdev)); +} + +/** + * ata_acpi_port_power_on - set the power state of the ata port to D0 + * @ap: target ATA port + * + * This function is called at the beginning of ata_port_probe(). + */ +void ata_acpi_port_power_on(struct ata_port *ap) +{ + acpi_handle handle; + int i; + + /* + * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the + * ata_device instead of the ata_port. + */ + if (ap->flags & ATA_FLAG_ACPI_SATA) { + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->link.device[i]; + + if (!is_acpi_device_node(dev->tdev.fwnode)) + continue; + handle = ACPI_HANDLE(&dev->tdev); + if (!acpi_bus_power_manageable(handle)) + continue; + if (acpi_bus_set_power(handle, ACPI_STATE_D0)) + ata_dev_err(dev, + "acpi: failed to set power state to D0\n"); + } + return; + } + + if (!is_acpi_device_node(ap->tdev.fwnode)) + return; + handle = ACPI_HANDLE(&ap->tdev); + if (!acpi_bus_power_manageable(handle)) + return; + + if (acpi_bus_set_power(handle, ACPI_STATE_D0)) + ata_port_err(ap, "acpi: failed to set power state to D0\n"); +} + +/** * ata_acpi_dissociate - dissociate ATA host from ACPI objects * @host: target ATA host * diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ff53f5f029b4..6ce0b07e295d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5904,6 +5904,8 @@ void ata_port_probe(struct ata_port *ap) struct ata_eh_info *ehi = &ap->link.eh_info; unsigned long flags; + ata_acpi_port_power_on(ap); + /* kick EH for boot probing */ spin_lock_irqsave(ap->lock, flags); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b43a3196e2be..026122bb6f2f 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1095,6 +1095,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, */ sdev->manage_runtime_start_stop = 1; sdev->manage_shutdown = 1; + sdev->manage_restart = ata_acpi_dev_manage_restart(dev); sdev->force_runtime_start_on_system_start = 1; } diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index e5b977a8d3e1..0e7ecac73680 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -130,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev); extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state); extern void ata_acpi_bind_port(struct ata_port *ap); extern void ata_acpi_bind_dev(struct ata_device *dev); +extern void ata_acpi_port_power_on(struct ata_port *ap); +extern bool ata_acpi_dev_manage_restart(struct ata_device *dev); extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); #else static inline void ata_acpi_dissociate(struct ata_host *host) { } @@ -140,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap, pm_message_t state) { } static inline void ata_acpi_bind_port(struct ata_port *ap) {} static inline void ata_acpi_bind_dev(struct ata_device *dev) {} +static inline void ata_acpi_port_power_on(struct ata_port *ap) {} +static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; } #endif /* libata-scsi.c */ diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile index 875a53703c82..70f8f02f14a3 100644 --- a/drivers/firmware/xilinx/Makefile +++ b/drivers/firmware/xilinx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Xilinx firmwares -obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o +obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c new file mode 100644 index 000000000000..85da8a822f3a --- /dev/null +++ b/drivers/firmware/xilinx/zynqmp-ufs.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Firmware Layer for UFS APIs + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/module.h> + +/* Register Node IDs */ +#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */ +#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */ + +/* Register Offsets for PMC IOU SLCR */ +#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */ +#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */ + +/* Masks for SRAM Control and Status Register */ +#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */ +#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */ +#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */ + +/* Mask to check M-PHY TX-RX configuration readiness */ +#define TX_RX_CFG_RDY_MASK GENMASK(3, 0) + +/* Register Offsets for EFUSE Cache */ +#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */ + +/** + * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness + * @is_ready: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + u32 regval; + int ret; + + if (!is_ready) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, ®val); + if (ret) + return ret; + + regval &= TX_RX_CFG_RDY_MASK; + if (regval) + *is_ready = true; + else + *is_ready = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready); + +/** + * zynqmp_pm_is_sram_init_done - check SRAM initialization + * @is_done: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + u32 regval; + int ret; + + if (!is_done) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, ®val); + if (ret) + return ret; + + regval &= SRAM_CSR_INIT_DONE_MASK; + if (regval) + *is_done = true; + else + *is_done = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done); + +/** + * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_sram_bypass(void) +{ + u32 sram_csr; + int ret; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr); + if (ret) + return ret; + + sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK; + sram_csr |= SRAM_CSR_BYPASS_MASK; + + return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, + GENMASK(2, 1), sram_csr); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass); + +/** + * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values + * @val: Store the calibration value + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 02da3e48bc8f..b7cd0eca9eaa 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1617,6 +1617,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, } /** + * zynqmp_pm_sec_read_reg - PM call to securely read from given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @ret_value: Output data read from the given offset after + * firmware access policy is successfully enforced + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + u32 count = 1; + int ret; + + if (!ret_value) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG, + offset, count); + + *ret_value = ret_payload[1]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg); + +/** + * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @mask: Mask to be used + * @value: Value to be written + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask, + u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG, + offset, mask, value); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg); + +/** * zynqmp_pm_set_sd_config - PM call to set value of SD config registers * @node: SD node ID * @config: The config type of SD registers diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 4912087de10d..c8c5dfb3ba9a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2438,7 +2438,7 @@ static int __init fcoe_init(void) unsigned int cpu; int rc = 0; - fcoe_wq = alloc_workqueue("fcoe", 0, 0); + fcoe_wq = alloc_workqueue("fcoe", WQ_PERCPU, 0); if (!fcoe_wq) return -ENOMEM; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 224edacf2d8e..689793d03c20 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -311,7 +311,6 @@ struct lpfc_defer_flogi_acc { u16 rx_id; u16 ox_id; struct lpfc_nodelist *ndlp; - }; #define LPFC_VMID_TIMER 300 /* timer interval in seconds */ @@ -634,6 +633,7 @@ struct lpfc_vport { #define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ #define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ #define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */ +#define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */ struct list_head fc_nodes; spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */ @@ -1078,6 +1078,8 @@ struct lpfc_hba { uint32_t nport_event_cnt; /* timestamp for nlplist entry */ + unsigned long pni; /* 64-bit Platform Name Identifier */ + uint8_t wwnn[8]; uint8_t wwpn[8]; uint32_t RandomData[7]; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index f93f8dca65bd..d3caac394291 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1743,6 +1743,28 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } static void +lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport; + struct lpfc_dmabuf *outp; + struct lpfc_sli_ct_request *ctrsp; + u32 ulp_status; + + vport = cmdiocb->vport; + ulp_status = get_job_ulpstatus(phba, rspiocb); + + if (ulp_status == IOSTAT_SUCCESS) { + outp = cmdiocb->rsp_dmabuf; + ctrsp = (struct lpfc_sli_ct_request *)outp->virt; + if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) + vport->ct_flags |= FC_CT_RSPNI_PNI; + } + lpfc_cmpl_ct(phba, cmdiocb, rspiocb); +} + +static void lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { @@ -1956,6 +1978,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSNN_NN) bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RSPNI_PNI) + bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ; else if (cmdcode == SLI_CTNS_DA_ID) bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFF_ID) @@ -2077,6 +2101,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, CtReq->un.rsnn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; break; + case SLI_CTNS_RSPNI_PNI: + vport->ct_flags &= ~FC_CT_RSPNI_PNI; + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_RSPNI_PNI); + CtReq->un.rspni.pni = cpu_to_be64(phba->pni); + scnprintf(CtReq->un.rspni.symbname, + sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s", + phba->os_host_name); + CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname, + sizeof(CtReq->un.rspni.symbname)); + cmpl = lpfc_cmpl_ct_cmd_rspni_pni; + break; case SLI_CTNS_DA_ID: /* Implement DA_ID Nameserver request */ CtReq->CommandResponse.bits.CmdRsp = diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 3d47dc7458d1..51cb8571c049 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -208,6 +208,7 @@ enum lpfc_nlp_flag { NPR list */ NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_FLOGI_DFR_ACC = 28, /* FLOGI LS_ACC was Deferred */ NLP_SC_REQ = 29, /* Target requires authentication */ NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b71db7d7d747..02b6d31b9ad9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -650,8 +650,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; - if (sp->cls4.classValid) - ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; @@ -934,10 +932,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { /* One additional decrement on node reference count to - * trigger the release of the node + * trigger the release of the node. Make sure the ndlp + * is marked NLP_DROPPED. */ - if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); + } goto out; } @@ -995,9 +998,10 @@ stop_rr_fcf_flogi: IOERR_LOOP_OPEN_FAILURE))) lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, "2858 FLOGI Status:x%x/x%x TMO" - ":x%x Data x%lx x%x\n", + ":x%x Data x%lx x%x x%lx x%x\n", ulp_status, ulp_word4, tmo, - phba->hba_flag, phba->fcf.fcf_flag); + phba->hba_flag, phba->fcf.fcf_flag, + ndlp->nlp_flag, ndlp->fc4_xpt_flags); /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { @@ -1015,14 +1019,17 @@ stop_rr_fcf_flogi: * reference to trigger node release. */ if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); + } lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, "0150 FLOGI Status:x%x/x%x " - "xri x%x TMO:x%x refcnt %d\n", + "xri x%x iotag x%x TMO:x%x refcnt %d\n", ulp_status, ulp_word4, cmdiocb->sli4_xritag, - tmo, kref_read(&ndlp->kref)); + cmdiocb->iotag, tmo, kref_read(&ndlp->kref)); /* If this is not a loop open failure, bail out */ if (!(ulp_status == IOSTAT_LOCAL_REJECT && @@ -1279,6 +1286,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t tmo, did; int rc; + /* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked + * NLP_DROPPED. This happens when the FLOGI completed with the XB bit + * set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is + * issued. The time window for the XRI_ABORTED CQE can be as much as + * 2*2*RA_TOV allowing for ndlp reuse of this type when the link is + * cycling quickly. When true, restore the initial reference and remove + * the NLP_DROPPED flag as lpfc is retrying. + */ + if (test_and_clear_bit(NLP_DROPPED, &ndlp->nlp_flag)) { + if (!lpfc_nlp_get(ndlp)) + return 1; + } + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_FLOGI); @@ -1334,6 +1354,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Can't do SLI4 class2 without support sequence coalescing */ sp->cls2.classValid = 0; sp->cls2.seqDelivery = 0; + + /* Fill out Auxiliary Parameter Data */ + if (phba->pni) { + sp->aux.flags = + AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID; + sp->aux.pni = cpu_to_be64(phba->pni); + sp->aux.npiv_cnt = cpu_to_be16(phba->max_vpi - 1); + } } else { /* Historical, setting sequential-delivery bit for SLI3 */ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; @@ -1413,11 +1441,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->defer_flogi_acc.ox_id; } - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "3354 Xmit deferred FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%lx\n", - phba->defer_flogi_acc.rx_id, - phba->defer_flogi_acc.ox_id, phba->hba_flag); + /* The LS_ACC completion needs to drop the initial reference. + * This is a special case for Pt2Pt because both FLOGIs need + * to complete and lpfc defers the LS_ACC when the remote + * FLOGI arrives before the driver's FLOGI. + */ + set_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag); /* Send deferred FLOGI ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, @@ -1433,6 +1462,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->defer_flogi_acc.ndlp = NULL; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3354 Xmit deferred FLOGI ACC: rx_id: x%x," + " ox_id: x%x, ndlp x%px hba_flag x%lx\n", + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, + phba->defer_flogi_acc.ndlp, + phba->hba_flag); + vport->fc_myDID = did; } @@ -2248,7 +2285,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); - sp->cmn.bbRcvSizeMsb &= 0xF; + if (!test_bit(FC_PT2PT, &vport->fc_flag)) + sp->cmn.bbRcvSizeMsb &= 0xF; /* Check if the destination port supports VMID */ ndlp->vmid_support = 0; @@ -2367,7 +2405,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, mode = KERN_INFO; /* Warn PRLI status */ - lpfc_printf_vlog(vport, mode, LOG_ELS, + lpfc_vlog_msg(vport, mode, LOG_ELS, "2754 PRLI DID:%06X Status:x%x/x%x, " "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, @@ -3024,6 +3062,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_DID, ulp_status, ulp_word4); + /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */ if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) skip_recovery = 1; } @@ -3262,7 +3301,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) return -ENOMEM; } rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, - (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); + (u8 *)&ns_ndlp->fc_sparam, mbox, fc_ndlp->nlp_rpi); if (rc) { rc = -EACCES; goto out; @@ -3306,7 +3345,8 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) * * This routine is a generic completion callback function for Discovery ELS cmd. * Currently used by the ELS command issuing routines for the ELS State Change - * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). + * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC), + * lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf(). * These commands will be retried once only for ELS timeout errors. **/ static void @@ -3379,11 +3419,21 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); return; } + if (ulp_status) { /* ELS discovery cmd completes with error */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, "4203 ELS cmd x%x error: x%x x%X\n", cmd, ulp_status, ulp_word4); + + /* In the case where the ELS cmd completes with an error and + * the node does not have RPI registered, the node is + * outstanding and should put its initial reference. + */ + if ((cmd == ELS_CMD_SCR || cmd == ELS_CMD_RDF) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && + !test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + lpfc_nlp_put(ndlp); goto out; } @@ -3452,6 +3502,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) uint8_t *pcmd; uint16_t cmdsize; struct lpfc_nodelist *ndlp; + bool node_created = false; cmdsize = (sizeof(uint32_t) + sizeof(SCR)); @@ -3461,21 +3512,21 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) if (!ndlp) return 1; lpfc_enqueue_node(vport, ndlp); + node_created = true; } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_SCR); if (!elsiocb) - return 1; + goto out_node_created; if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_reg_fab_ctrl_node(vport, ndlp); if (rc) { - lpfc_els_free_iocb(phba, elsiocb); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0937 %s: Failed to reg fc node, rc %d\n", __func__, rc); - return 1; + goto out_free_iocb; } } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; @@ -3494,23 +3545,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) phba->fc_stat.elsXmitSCR++; elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); - if (!elsiocb->ndlp) { - lpfc_els_free_iocb(phba, elsiocb); - return 1; - } + if (!elsiocb->ndlp) + goto out_free_iocb; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue SCR: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); - if (rc == IOCB_ERROR) { - lpfc_els_free_iocb(phba, elsiocb); - lpfc_nlp_put(ndlp); - return 1; - } + if (rc == IOCB_ERROR) + goto out_iocb_error; return 0; + +out_iocb_error: + lpfc_nlp_put(ndlp); +out_free_iocb: + lpfc_els_free_iocb(phba, elsiocb); +out_node_created: + if (node_created) + lpfc_nlp_put(ndlp); + return 1; } /** @@ -3597,8 +3652,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue RSCN: did:x%x", - ndlp->nlp_DID, 0, 0); + "Issue RSCN: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { @@ -3705,10 +3760,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) lpfc_nlp_put(ndlp); return 1; } - /* This will cause the callback-function lpfc_cmpl_els_cmd to - * trigger the release of the node. - */ - /* Don't release reference count as RDF is likely outstanding */ + return 0; } @@ -3726,7 +3778,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) * * Return code * 0 - Successfully issued rdf command - * 1 - Failed to issue rdf command + * < 0 - Failed to issue rdf command + * -EACCES - RDF not required for NPIV_PORT + * -ENODEV - No fabric controller device available + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. + * **/ int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) @@ -3737,25 +3794,30 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) struct lpfc_nodelist *ndlp; uint16_t cmdsize; int rc; + bool node_created = false; + int err; cmdsize = sizeof(*prdf); + /* RDF ELS is not required on an NPIV VN_Port. */ + if (vport->port_type == LPFC_NPIV_PORT) + return -EACCES; + ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); if (!ndlp) { ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); if (!ndlp) return -ENODEV; lpfc_enqueue_node(vport, ndlp); + node_created = true; } - /* RDF ELS is not required on an NPIV VN_Port. */ - if (vport->port_type == LPFC_NPIV_PORT) - return -EACCES; - elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RDF); - if (!elsiocb) - return -ENOMEM; + if (!elsiocb) { + err = -ENOMEM; + goto out_node_created; + } /* Configure the payload for the supported FPIN events. */ prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; @@ -3781,8 +3843,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { - lpfc_els_free_iocb(phba, elsiocb); - return -EIO; + err = -EIO; + goto out_free_iocb; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, @@ -3791,11 +3853,19 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { - lpfc_els_free_iocb(phba, elsiocb); - lpfc_nlp_put(ndlp); - return -EIO; + err = -EIO; + goto out_iocb_error; } return 0; + +out_iocb_error: + lpfc_nlp_put(ndlp); +out_free_iocb: + lpfc_els_free_iocb(phba, elsiocb); +out_node_created: + if (node_created) + lpfc_nlp_put(ndlp); + return err; } /** @@ -3816,19 +3886,23 @@ static int lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { + int rc; + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL); /* Send LS_ACC */ - if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { + if (rc) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, - "1623 Failed to RDF_ACC from x%x for x%x\n", - ndlp->nlp_DID, vport->fc_myDID); + "1623 Failed to RDF_ACC from x%x for x%x Data: %d\n", + ndlp->nlp_DID, vport->fc_myDID, rc); return -EIO; } + rc = lpfc_issue_els_rdf(vport, 0); /* Issue new RDF for reregistering */ - if (lpfc_issue_els_rdf(vport, 0)) { + if (rc) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, - "2623 Failed to re register RDF for x%x\n", - vport->fc_myDID); + "2623 Failed to re register RDF for x%x Data: %d\n", + vport->fc_myDID, rc); return -EIO; } @@ -4299,7 +4373,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following - * lpfc_els_free_iocb routine to trigger the rlease of + * lpfc_els_free_iocb routine to trigger the release of * the node. */ lpfc_els_free_iocb(phba, elsiocb); @@ -5127,7 +5201,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) { struct lpfc_dmabuf *buf_ptr, *buf_ptr1; - /* The I/O iocb is complete. Clear the node and first dmbuf */ + /* The I/O iocb is complete. Clear the node and first dmabuf */ elsiocb->ndlp = NULL; /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ @@ -5160,14 +5234,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = elsiocb->cmd_dmabuf; lpfc_els_free_data(phba, buf_ptr1); - elsiocb->cmd_dmabuf = NULL; } } if (elsiocb->bpl_dmabuf) { buf_ptr = elsiocb->bpl_dmabuf; lpfc_els_free_bpl(phba, buf_ptr); - elsiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; @@ -5305,11 +5377,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; LPFC_MBOXQ_t *mbox = NULL; u32 ulp_status, ulp_word4, tmo, did, iotag; + u32 cmd; if (!vport) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "3177 null vport in ELS rsp\n"); - goto out; + goto release; } if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -5419,7 +5492,7 @@ out: * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && - vport && vport->port_type == LPFC_NPIV_PORT && + vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && @@ -5435,6 +5508,27 @@ out: } } + /* The driver's unsolicited deferred FLOGI ACC in Pt2Pt needs to + * release the initial reference because the put after the free_iocb + * call removes only the reference from the defer logic. This FLOGI + * is never registered with the SCSI transport. + */ + if (test_bit(FC_PT2PT, &vport->fc_flag) && + test_and_clear_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag)) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "3357 Pt2Pt Defer FLOGI ACC ndlp x%px, " + "nflags x%lx, fc_flag x%lx\n", + ndlp, ndlp->nlp_flag, + vport->fc_flag); + cmd = *((u32 *)cmdiocb->cmd_dmabuf->virt); + if (cmd == ELS_CMD_ACC) { + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + lpfc_nlp_put(ndlp); + } + } + +release: /* Release the originating I/O reference. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); @@ -5569,7 +5663,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, sp->cls1.classValid = 0; sp->cls2.classValid = 0; sp->cls3.classValid = 0; - sp->cls4.classValid = 0; /* Copy our worldwide names */ memcpy(&sp->portName, &vport->fc_sparam.portName, @@ -5583,7 +5676,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); - sp->cmn.bbRcvSizeMsb &= 0xF; + if (!test_bit(FC_PT2PT, &vport->fc_flag)) + sp->cmn.bbRcvSizeMsb &= 0xF; /* If our firmware supports this feature, convey that * info to the target using the vendor specific field. @@ -8402,13 +8496,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, &wqe->xmit_els_rsp.wqe_com); vport->fc_myDID = did; - - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "3344 Deferring FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%lx\n", - phba->defer_flogi_acc.rx_id, - phba->defer_flogi_acc.ox_id, phba->hba_flag); - phba->defer_flogi_acc.flag = true; /* This nlp_get is paired with nlp_puts that reset the @@ -8417,6 +8504,14 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * processed or cancelled. */ phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3344 Deferring FLOGI ACC: rx_id: x%x," + " ox_id: x%x, ndlp x%px, hba_flag x%lx\n", + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, + phba->defer_flogi_acc.ndlp, + phba->hba_flag); return 0; } @@ -8734,7 +8829,7 @@ reject_out: * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * - * This routine processes Read Timout Value (RTV) IOCB received as an + * This routine processes Read Timeout Value (RTV) IOCB received as an * ELS unsolicited event. It first checks the remote port state. If the * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE * state, it invokes the lpfc_els_rsl_reject() routine to send the reject @@ -10357,11 +10452,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { - if (newnode) - lpfc_nlp_put(ndlp); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) goto dropit; - } elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10843,7 +10935,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); /* * The different unsolicited event handlers would tell us - * if they are done with "mp" by setting cmd_dmabuf to NULL. + * if they are done with "mp" by setting cmd_dmabuf/bpl_dmabuf to NULL. */ if (elsiocb->cmd_dmabuf) { lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); @@ -11423,6 +11515,13 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sp->cls2.seqDelivery = 1; sp->cls3.seqDelivery = 1; + /* Fill out Auxiliary Parameter Data */ + if (phba->pni) { + sp->aux.flags = + AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID; + sp->aux.pni = cpu_to_be64(phba->pni); + } + pcmd += sizeof(uint32_t); /* CSP Word 2 */ pcmd += sizeof(uint32_t); /* CSP Word 3 */ pcmd += sizeof(uint32_t); /* CSP Word 4 */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 43d246c5c049..bb803f32bc1b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -424,6 +424,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) { + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " @@ -566,7 +567,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } - lpfc_nlp_put(ndlp); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + lpfc_nlp_put(ndlp); return fcf_inuse; } @@ -4371,6 +4373,8 @@ out: lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); + if (phba->pni) + lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 3bc0efa7453e..b2e353590ebb 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -168,6 +168,11 @@ struct lpfc_sli_ct_request { uint8_t len; uint8_t symbname[255]; } rspn; + struct rspni { /* For RSPNI_PNI requests */ + __be64 pni; + u8 len; + u8 symbname[255]; + } rspni; struct gff { uint32_t PortId; } gff; @@ -213,6 +218,8 @@ struct lpfc_sli_ct_request { sizeof(struct da_id)) #define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct rspn)) +#define RSPNI_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rspni)) /* * FsType Definitions @@ -309,6 +316,7 @@ struct lpfc_sli_ct_request { #define SLI_CTNS_RIP_NN 0x0235 #define SLI_CTNS_RIPA_NN 0x0236 #define SLI_CTNS_RSNN_NN 0x0239 +#define SLI_CTNS_RSPNI_PNI 0x0240 #define SLI_CTNS_DA_ID 0x0300 /* @@ -512,6 +520,21 @@ struct class_parms { uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */ }; +enum aux_parm_flags { + AUX_PARM_PNI_VALID = 0x20, /* FC Word 0, bit 29 */ + AUX_PARM_DATA_VALID = 0x40, /* FC Word 0, bit 30 */ +}; + +struct aux_parm { + u8 flags; /* FC Word 0, bit 31:24 */ + u8 ext_feat[3]; /* FC Word 0, bit 23:0 */ + + __be64 pni; /* FC Word 1 and 2, platform name identifier */ + + __be16 rsvd; /* FC Word 3, bit 31:16 */ + __be16 npiv_cnt; /* FC Word 3, bit 15:0 */ +} __packed; + struct serv_parm { /* Structure is in Big Endian format */ struct csp cmn; struct lpfc_name portName; @@ -519,7 +542,7 @@ struct serv_parm { /* Structure is in Big Endian format */ struct class_parms cls1; struct class_parms cls2; struct class_parms cls3; - struct class_parms cls4; + struct aux_parm aux; union { uint8_t vendorVersion[16]; struct { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f206267d9ecd..a9c6dbe3b465 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3057,12 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_vmid_vport_cleanup(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_DID == Fabric_Cntl_DID && - ndlp->nlp_state == NLP_STE_UNUSED_NODE) { - lpfc_nlp_put(ndlp); - continue; - } - /* Fabric Ports not in UNMAPPED state are cleaned up in the * DEVICE_RM event. */ @@ -9082,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport) vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; } - lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "6077 Setup FDMI mask: hba x%x port x%x\n", - vport->fdmi_hba_mask, vport->fdmi_port_mask); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6077 Setup FDMI mask: hba x%x port x%x\n", + vport->fdmi_hba_mask, vport->fdmi_port_mask); } /** diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 1e5ef93e67e3..8240d59f4120 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -432,8 +432,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; - if (sp->cls4.classValid) - ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; /* if already logged in, do implicit logout */ @@ -452,18 +450,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (!(ndlp->nlp_type & NLP_FABRIC) && !(phba->nvmet_support)) { - /* Clear ndlp info, since follow up PRLI may have - * updated ndlp information - */ - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); - ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); - - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, - ndlp, NULL); - return 1; + break; } if (nlp_portwwn != 0 && nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) @@ -485,7 +472,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); break; } - + /* Clear ndlp info, since follow up processes may have + * updated ndlp information + */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; @@ -1426,8 +1415,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; - if (sp->cls4.classValid) - ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7ea7c4245c69..73d77cfab5f8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -27,6 +27,8 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/lockdep.h> +#include <linux/dmi.h> +#include <linux/of.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -8447,6 +8449,70 @@ lpfc_set_host_tm(struct lpfc_hba *phba) } /** + * lpfc_get_platform_uuid - Attempts to extract a platform uuid + * @phba: pointer to lpfc hba data structure. + * + * This routine attempts to first read SMBIOS DMI data for the System + * Information structure offset 08h called System UUID. Else, no platform + * UUID will be advertised. + **/ +static void +lpfc_get_platform_uuid(struct lpfc_hba *phba) +{ + int rc; + const char *uuid; + char pni[17] = {0}; /* 16 characters + '\0' */ + bool is_ff = true, is_00 = true; + u8 i; + + /* First attempt SMBIOS DMI */ + uuid = dmi_get_system_info(DMI_PRODUCT_UUID); + if (uuid) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2088 SMBIOS UUID %s\n", + uuid); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2099 Could not extract UUID\n"); + } + + if (uuid && uuid_is_valid(uuid)) { + /* Generate PNI from UUID format. + * + * 1.) Extract lower 64 bits from UUID format. + * 2.) Set 3h for NAA Locally Assigned Name Identifier format. + * + * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy + * + * extract the yyyy-yyyyyyyyyyyy portion + * final PNI 3yyyyyyyyyyyyyyy + */ + scnprintf(pni, sizeof(pni), "3%c%c%c%s", + uuid[20], uuid[21], uuid[22], &uuid[24]); + + /* Sanitize the converted PNI */ + for (i = 1; i < 16 && (is_ff || is_00); i++) { + if (pni[i] != '0') + is_00 = false; + if (pni[i] != 'f' && pni[i] != 'F') + is_ff = false; + } + + /* Convert from char* to unsigned long */ + rc = kstrtoul(pni, 16, &phba->pni); + if (!rc && !is_ff && !is_00) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2100 PNI 0x%016lx\n", phba->pni); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2101 PNI %s generation status %d\n", + pni, rc); + phba->pni = 0; + } + } +} + +/** * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. * @@ -8529,6 +8595,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) clear_bit(HBA_FCOE_MODE, &phba->hba_flag); } + /* Obtain platform UUID, only for SLI4 FC adapters */ + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) + lpfc_get_platform_uuid(phba); + if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == LPFC_DCBX_CEE_MODE) set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); @@ -19858,13 +19928,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba) } /** - * lpfc_sli4_resume_rpi - Remove the rpi bitmask region + * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI * @ndlp: pointer to lpfc nodelist data structure. * @cmpl: completion call-back. * @iocbq: data to load as mbox ctx_u information * - * This routine is invoked to remove the memory region that - * provided rpi via a bitmask. + * Return codes + * 0 - successful + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, @@ -19894,7 +19966,6 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, return -EIO; } - /* Post all rpi memory regions to the port. */ lpfc_resume_rpi(mboxq, ndlp); if (cmpl) { mboxq->mbox_cmpl = cmpl; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 31c3c5abdca6..f3dada5bf7c1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.11" +#define LPFC_DRIVER_VERSION "14.4.0.12" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c37992147847..99eb0a30df61 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -605,68 +605,6 @@ sdev_show_##field (struct device *dev, struct device_attribute *attr, \ sdev_show_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); - -/* - * sdev_rw_attr: create a function and attribute variable for a - * read/write field. - */ -#define sdev_rw_attr(field, format_string) \ - sdev_show_function(field, format_string) \ - \ -static ssize_t \ -sdev_store_##field (struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - struct scsi_device *sdev; \ - sdev = to_scsi_device(dev); \ - sscanf (buf, format_string, &sdev->field); \ - return count; \ -} \ -static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); - -/* Currently we don't export bit fields, but we might in future, - * so leave this code in */ -#if 0 -/* - * sdev_rd_attr: create a function and attribute variable for a - * read/write bit field. - */ -#define sdev_rw_attr_bit(field) \ - sdev_show_function(field, "%d\n") \ - \ -static ssize_t \ -sdev_store_##field (struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - int ret; \ - struct scsi_device *sdev; \ - ret = scsi_sdev_check_buf_bit(buf); \ - if (ret >= 0) { \ - sdev = to_scsi_device(dev); \ - sdev->field = ret; \ - ret = count; \ - } \ - return ret; \ -} \ -static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); - -/* - * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", - * else return -EINVAL. - */ -static int scsi_sdev_check_buf_bit(const char *buf) -{ - if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { - if (buf[0] == '1') - return 1; - else if (buf[0] == '0') - return 0; - else - return -EINVAL; - } else - return -EINVAL; -} -#endif /* * Create the actual show/store functions and data structures. */ @@ -710,10 +648,14 @@ static ssize_t sdev_store_timeout (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct scsi_device *sdev; - int timeout; - sdev = to_scsi_device(dev); - sscanf (buf, "%d\n", &timeout); + struct scsi_device *sdev = to_scsi_device(dev); + int ret, timeout; + + ret = kstrtoint(buf, 0, &timeout); + if (ret) + return ret; + if (timeout <= 0) + return -EINVAL; blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); return count; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 0252d3f6bed1..f2c0744b4480 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -318,6 +318,35 @@ static ssize_t manage_shutdown_store(struct device *dev, } static DEVICE_ATTR_RW(manage_shutdown); +static ssize_t manage_restart_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_restart); +} + +static ssize_t manage_restart_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_restart = v; + + return count; +} +static DEVICE_ATTR_RW(manage_restart); + static ssize_t allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -654,6 +683,7 @@ static struct attribute *sd_disk_attrs[] = { &dev_attr_manage_system_start_stop.attr, &dev_attr_manage_runtime_start_stop.attr, &dev_attr_manage_shutdown.attr, + &dev_attr_manage_restart.attr, &dev_attr_protection_type.attr, &dev_attr_protection_mode.attr, &dev_attr_app_tag_own.attr, @@ -4177,7 +4207,9 @@ static void sd_shutdown(struct device *dev) (system_state == SYSTEM_POWER_OFF && sdkp->device->manage_shutdown) || (system_state == SYSTEM_RUNNING && - sdkp->device->manage_runtime_start_stop)) { + sdkp->device->manage_runtime_start_stop) || + (system_state == SYSTEM_RESTART && + sdkp->device->manage_restart)) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b5e71ff26e8e..fe549e2b7c94 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -34,11 +34,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.34-035" +#define DRIVER_VERSION "2.1.36-026" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 34 -#define DRIVER_REVISION 35 +#define DRIVER_RELEASE 36 +#define DRIVER_REVISION 26 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -5555,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } +/* + * Adjust the timeout value for physical devices sent to the firmware + * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds. + * + * This provides the firmware with additional time to attempt early recovery + * before the OS-level timeout occurs. + */ +#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv)) + static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, bool io_high_prio) { int rc; + u32 timeout; size_t cdb_length; struct pqi_io_request *io_request; struct pqi_raid_path_request *request; + struct request *rq; io_request = pqi_alloc_io_request(ctrl_info, scmd); if (!io_request) @@ -5634,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, return SCSI_MLQUEUE_HOST_BUSY; } + if (device->is_physical_device) { + rq = scsi_cmd_to_rq(scmd); + timeout = rq->timeout / HZ; + put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout); + } + pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); return 0; @@ -6410,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) { + unsigned long flags; int rc; mutex_lock(&ctrl_info->lun_reset_mutex); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) { + dev_warn(&ctrl_info->pci_dev->dev, + "skipping reset of scsi %d:%d:%d:%u, device has been removed\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + mutex_unlock(&ctrl_info->lun_reset_mutex); + return 0; + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + dev_err(&ctrl_info->pci_dev->dev, "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); @@ -6594,7 +6623,9 @@ static void pqi_sdev_destroy(struct scsi_device *sdev) { struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; + struct pqi_tmf_work *tmf_work; int mutex_acquired; + unsigned int lun; unsigned long flags; ctrl_info = shost_to_hba(sdev->host); @@ -6621,8 +6652,13 @@ static void pqi_sdev_destroy(struct scsi_device *sdev) mutex_unlock(&ctrl_info->scan_mutex); + for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) + cancel_work_sync(&tmf_work->work_struct); + + mutex_lock(&ctrl_info->lun_reset_mutex); pqi_dev_info(ctrl_info, "removed", device); pqi_free_device(device); + mutex_unlock(&ctrl_info->lun_reset_mutex); } static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) @@ -10111,6 +10147,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4840) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) }, { diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 74a6830b7ed8..168f25e4aaa3 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3526,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size) out: return result; } - +/* + * Handles any extra state needed for ioctls which are not st-specific. + * Called with the scsi_tape lock held, released before return + */ +static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm, + struct file *file, unsigned int cmd_in, + unsigned long arg) +{ + int i, retval = 0; + + if (!STm->defined) { + retval = -ENXIO; + goto out; + } + + switch (cmd_in) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SCSI_IOCTL_GET_PCI: + break; + case SG_IO: + case SCSI_IOCTL_SEND_COMMAND: + case CDROM_SEND_PACKET: + if (!capable(CAP_SYS_RAWIO)) { + retval = -EPERM; + goto out; + } + fallthrough; + default: + if ((i = flush_buffer(STp, 0)) < 0) { + retval = i; + goto out; + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } + } + } + mutex_unlock(&STp->lock); + + retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, + cmd_in, (void __user *)arg); + if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { + /* unload */ + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } + + return retval; +out: + mutex_unlock(&STp->lock); + return retval; +} /* The ioctl command */ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) @@ -3565,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) if (retval) goto out; + switch (cmd_in) { + case MTIOCPOS: + case MTIOCGET: + case MTIOCTOP: + break; + default: + return st_common_ioctl(STp, STm, file, cmd_in, arg); + } + cmd_type = _IOC_TYPE(cmd_in); cmd_nr = _IOC_NR(cmd_in); @@ -3876,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) } mt_pos.mt_blkno = blk; retval = put_user_mtpos(p, &mt_pos); - goto out; - } - mutex_unlock(&STp->lock); - - switch (cmd_in) { - case SG_IO: - case SCSI_IOCTL_SEND_COMMAND: - case CDROM_SEND_PACKET: - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - break; - default: - break; } - - retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p); - if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { - /* unload */ - STp->rew_at_close = 0; - STp->ready = ST_NO_TAPE; - } - return retval; - out: mutex_unlock(&STp->lock); return retval; diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index d8ad02c29320..e02f28e5a104 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1844,6 +1844,7 @@ out_release_regions: out_scsi_host_put: scsi_host_put(host); out_disable: + unregister_reboot_notifier(&stex_notifier); pci_disable_device(pdev); return err; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b19acd662726..77da1fc82b8d 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); DEF_CONFIGFS_ATTRIB_SHOW(submit_type); +DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len); +DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment); +DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity); +DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary); +DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary); #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ static ssize_t _name##_store(struct config_item *item, const char *page,\ @@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len); CONFIGFS_ATTR(, alua_support); CONFIGFS_ATTR(, pgr_support); CONFIGFS_ATTR(, submit_type); +CONFIGFS_ATTR_RO(, atomic_max_len); +CONFIGFS_ATTR_RO(, atomic_alignment); +CONFIGFS_ATTR_RO(, atomic_granularity); +CONFIGFS_ATTR_RO(, atomic_max_with_boundary); +CONFIGFS_ATTR_RO(, atomic_max_boundary); /* * dev_attrib attributes for devices using the target core SBC/SPC @@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = { &attr_pgr_support, &attr_emulate_rsoc, &attr_submit_type, + &attr_atomic_alignment, + &attr_atomic_max_len, + &attr_atomic_granularity, + &attr_atomic_max_with_boundary, + &attr_atomic_max_boundary, NULL, }; EXPORT_SYMBOL(sbc_attrib_attrs); @@ -2758,33 +2773,24 @@ static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) { struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); - struct se_device *dev; - struct se_hba *hba; struct t10_alua_lu_gp_member *lu_gp_mem; - ssize_t len = 0, cur_len; - unsigned char buf[LU_GROUP_NAME_BUF] = { }; + const char *const end = page + PAGE_SIZE; + char *cur = page; spin_lock(&lu_gp->lu_gp_lock); list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { - dev = lu_gp_mem->lu_gp_mem_dev; - hba = dev->se_hba; + struct se_device *dev = lu_gp_mem->lu_gp_mem_dev; + struct se_hba *hba = dev->se_hba; - cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", + cur += scnprintf(cur, end - cur, "%s/%s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item)); - cur_len++; /* Extra byte for NULL terminator */ - - if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) { - pr_warn("Ran out of lu_gp_show_attr" - "_members buffer\n"); + if (WARN_ON_ONCE(cur >= end)) break; - } - memcpy(page+len, buf, cur_len); - len += cur_len; } spin_unlock(&lu_gp->lu_gp_lock); - return len; + return cur - page; } CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7bb711b24c0d..8ccb8541db1c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -814,6 +814,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; + /* Skip allocating lun_stats since we can't export them. */ xcopy_lun = &dev->xcopy_lun; rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); init_completion(&xcopy_lun->lun_shutdown_comp); @@ -840,12 +841,29 @@ free_device: return NULL; } +void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + int block_size = bdev_logical_block_size(bdev); + + if (!bdev_can_atomic_write(bdev)) + return; + + attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size; + attrib->atomic_granularity = attrib->atomic_alignment = + queue_atomic_write_unit_min_bytes(q) / block_size; + attrib->atomic_max_with_boundary = 0; + attrib->atomic_max_boundary = 0; +} +EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev); + /* * Check if the underlying struct block_device supports discard and if yes * configure the UNMAP parameters. */ -bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct block_device *bdev) +bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev) { int block_size = bdev_logical_block_size(bdev); @@ -863,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bdev_discard_alignment(bdev) / block_size; return true; } -EXPORT_SYMBOL(target_configure_unmap_from_queue); +EXPORT_SYMBOL(target_configure_unmap_from_bdev); /* * Convert from blocksize advertised to the initiator to the 512 byte diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 7156a4dc1ca7..13159928e365 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -697,7 +697,7 @@ static void target_fabric_port_release(struct config_item *item) struct se_lun *lun = container_of(to_config_group(item), struct se_lun, lun_group); - kfree_rcu(lun, rcu_head); + call_rcu(&lun->rcu_head, target_tpg_free_lun); } static struct configfs_item_operations target_fabric_port_item_ops = { diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 2d78ef74633c..b2610073e8cc 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev) struct inode *inode = file->f_mapping->host; if (S_ISBLK(inode->i_mode)) - return target_configure_unmap_from_queue(&dev->dev_attrib, - I_BDEV(inode)); + return target_configure_unmap_from_bdev(&dev->dev_attrib, + I_BDEV(inode)); /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ dev->dev_attrib.max_unmap_lba_count = 0x2000; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 66c292b7d74b..8ec7b534ad76 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); - return target_configure_unmap_from_queue(&dev->dev_attrib, - ib_dev->ibd_bd); + return target_configure_unmap_from_bdev(&dev->dev_attrib, + ib_dev->ibd_bd); } static int iblock_configure_device(struct se_device *dev) @@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev) if (bdev_nonrot(bd)) dev->dev_attrib.is_nonrot = 1; + target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd); + bi = bdev_get_integrity(bd); if (!bi) return 0; @@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, else if (!bdev_write_cache(ib_dev->ibd_bd)) opf |= REQ_FUA; } + + if (cmd->se_cmd_flags & SCF_ATOMIC) + opf |= REQ_ATOMIC; } else { opf = REQ_OP_READ; miter_dir = SG_MITER_FROM_SG; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 20aab1f50565..763e6d26e187 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -125,6 +125,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *, struct se_lun *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); +void target_tpg_free_lun(struct rcu_head *head); int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, bool, struct se_device *); void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index fe8beb7dbab1..abe91dc8722e 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) return 0; } +static sense_reason_t +sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) +{ + struct se_dev_attrib *attrib = &dev->dev_attrib; + u16 boundary, transfer_len; + u64 lba; + + lba = transport_lba_64(cdb); + boundary = get_unaligned_be16(&cdb[10]); + transfer_len = get_unaligned_be16(&cdb[12]); + + if (!attrib->atomic_max_len) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (boundary) { + if (transfer_len > attrib->atomic_max_with_boundary) + return TCM_INVALID_CDB_FIELD; + + if (boundary > attrib->atomic_max_boundary) + return TCM_INVALID_CDB_FIELD; + } else { + if (transfer_len > attrib->atomic_max_len) + return TCM_INVALID_CDB_FIELD; + } + + if (attrib->atomic_granularity) { + if (transfer_len % attrib->atomic_granularity) + return TCM_INVALID_CDB_FIELD; + + if (boundary && boundary % attrib->atomic_granularity) + return TCM_INVALID_CDB_FIELD; + } + + if (dev->dev_attrib.atomic_alignment) { + u64 _lba = lba; + + if (do_div(_lba, dev->dev_attrib.atomic_alignment)) + return TCM_INVALID_CDB_FIELD; + } + + return 0; +} + sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) { @@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) break; case WRITE_16: case WRITE_VERIFY_16: + case WRITE_ATOMIC_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); @@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) return ret; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + if (cdb[0] == WRITE_ATOMIC_16) { + cmd->se_cmd_flags |= SCF_ATOMIC; + + ret = sbc_check_atomic(dev, cmd, cdb); + if (ret) + return ret; + } cmd->execute_cmd = sbc_execute_rw; break; case VARIABLE_LENGTH_CMD: diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index aad0096afa21..fe2b888bcb43 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) have_tp = 1; buf[0] = dev->transport->get_device_type(dev); - buf[3] = have_tp ? 0x3c : 0x10; /* Set WSNZ to 1 */ buf[4] = 0x01; @@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) else put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); - /* - * Exit now if we don't support TP. - */ + put_unaligned_be16(12, &buf[2]); + if (!have_tp) - goto max_write_same; + goto try_atomic; /* * Set MAXIMUM UNMAP LBA COUNT @@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * MAXIMUM WRITE SAME LENGTH */ -max_write_same: put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); + put_unaligned_be16(40, &buf[2]); + +try_atomic: + /* + * ATOMIC + */ + if (!dev->dev_attrib.atomic_max_len) + goto done; + + if (dev->dev_attrib.atomic_max_len < io_max_blocks) + put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]); + else + put_unaligned_be32(io_max_blocks, &buf[44]); + + put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]); + put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]); + put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]); + put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]); + + put_unaligned_be16(60, &buf[2]); +done: return 0; } @@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = { .update_usage_bits = set_dpofua_usage_bits32, }; +static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + return cmd->se_dev->dev_attrib.atomic_max_len; +} + +static struct target_opcode_descriptor tcm_opcode_write_atomic16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_ATOMIC_16, + .cdb_size = 16, + .usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .enabled = tcm_is_atomic_enabled, + .update_usage_bits = set_dpofua_usage_bits, +}; + static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { @@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = { &tcm_opcode_write16, &tcm_opcode_write_verify16, &tcm_opcode_write_same32, + &tcm_opcode_write_atomic16, &tcm_opcode_compare_write, &tcm_opcode_read_capacity, &tcm_opcode_read_capacity16, diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 6bdf2d8bd694..083205052be2 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -276,56 +276,39 @@ static ssize_t target_stat_lu_state_bit_show(struct config_item *item, return snprintf(page, PAGE_SIZE, "exposed\n"); } -static ssize_t target_stat_lu_num_cmds_show(struct config_item *item, - char *page) -{ - struct se_device *dev = to_stat_lu_dev(item); - struct se_dev_io_stats *stats; - unsigned int cpu; - u32 cmds = 0; - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(dev->stats, cpu); - cmds += stats->total_cmds; - } - - /* scsiLuNumCommands */ - return snprintf(page, PAGE_SIZE, "%u\n", cmds); -} - -static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item, - char *page) -{ - struct se_device *dev = to_stat_lu_dev(item); - struct se_dev_io_stats *stats; - unsigned int cpu; - u32 bytes = 0; - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(dev->stats, cpu); - bytes += stats->read_bytes; - } - - /* scsiLuReadMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); -} - -static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item, - char *page) -{ - struct se_device *dev = to_stat_lu_dev(item); - struct se_dev_io_stats *stats; - unsigned int cpu; - u32 bytes = 0; - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(dev->stats, cpu); - bytes += stats->write_bytes; - } - - /* scsiLuWrittenMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); -} +#define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \ +static ssize_t \ +per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \ + char *page) \ +{ \ + struct stats_struct *stats; \ + unsigned int cpu; \ + u64 sum = 0; \ + \ + for_each_possible_cpu(cpu) { \ + stats = per_cpu_ptr(per_cpu_stats, cpu); \ + sum += stats->field; \ + } \ + \ + return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \ +} + +#define lu_show_per_cpu_stat(prefix, field, shift) \ +per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \ +static ssize_t \ +target_stat_##prefix##_show(struct config_item *item, char *page) \ +{ \ + struct se_device *dev = to_stat_lu_dev(item); \ + \ + return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \ +} \ + +/* scsiLuNumCommands */ +lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0); +/* scsiLuReadMegaBytes */ +lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20); +/* scsiLuWrittenMegaBytes */ +lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20); static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page) { @@ -623,53 +606,30 @@ static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item, return ret; } -static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item, - char *page) -{ - struct se_lun *lun = to_stat_tgt_port(item); - struct se_device *dev; - ssize_t ret = -ENODEV; - - rcu_read_lock(); - dev = rcu_dereference(lun->lun_se_dev); - if (dev) - ret = snprintf(page, PAGE_SIZE, "%lu\n", - atomic_long_read(&lun->lun_stats.cmd_pdus)); - rcu_read_unlock(); - return ret; -} - -static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item, - char *page) -{ - struct se_lun *lun = to_stat_tgt_port(item); - struct se_device *dev; - ssize_t ret = -ENODEV; - - rcu_read_lock(); - dev = rcu_dereference(lun->lun_se_dev); - if (dev) - ret = snprintf(page, PAGE_SIZE, "%u\n", - (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20)); - rcu_read_unlock(); - return ret; -} - -static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item, - char *page) -{ - struct se_lun *lun = to_stat_tgt_port(item); - struct se_device *dev; - ssize_t ret = -ENODEV; - - rcu_read_lock(); - dev = rcu_dereference(lun->lun_se_dev); - if (dev) - ret = snprintf(page, PAGE_SIZE, "%u\n", - (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20)); - rcu_read_unlock(); - return ret; -} +#define tgt_port_show_per_cpu_stat(prefix, field, shift) \ +per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \ +static ssize_t \ +target_stat_##prefix##_show(struct config_item *item, char *page) \ +{ \ + struct se_lun *lun = to_stat_tgt_port(item); \ + struct se_device *dev; \ + int ret; \ + \ + rcu_read_lock(); \ + dev = rcu_dereference(lun->lun_se_dev); \ + if (!dev) { \ + rcu_read_unlock(); \ + return -ENODEV; \ + } \ + \ + ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \ + rcu_read_unlock(); \ + return ret; \ +} + +tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0); +tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20); +tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20); static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item, char *page) @@ -1035,92 +995,34 @@ static ssize_t target_stat_auth_att_count_show(struct config_item *item, return ret; } -static ssize_t target_stat_auth_num_cmds_show(struct config_item *item, - char *page) -{ - struct se_lun_acl *lacl = auth_to_lacl(item); - struct se_node_acl *nacl = lacl->se_lun_nacl; - struct se_dev_entry_io_stats *stats; - struct se_dev_entry *deve; - unsigned int cpu; - ssize_t ret; - u32 cmds = 0; - - rcu_read_lock(); - deve = target_nacl_find_deve(nacl, lacl->mapped_lun); - if (!deve) { - rcu_read_unlock(); - return -ENODEV; - } - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(deve->stats, cpu); - cmds += stats->total_cmds; - } - - /* scsiAuthIntrOutCommands */ - ret = snprintf(page, PAGE_SIZE, "%u\n", cmds); - rcu_read_unlock(); - return ret; -} - -static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item, - char *page) -{ - struct se_lun_acl *lacl = auth_to_lacl(item); - struct se_node_acl *nacl = lacl->se_lun_nacl; - struct se_dev_entry_io_stats *stats; - struct se_dev_entry *deve; - unsigned int cpu; - ssize_t ret; - u32 bytes = 0; - - rcu_read_lock(); - deve = target_nacl_find_deve(nacl, lacl->mapped_lun); - if (!deve) { - rcu_read_unlock(); - return -ENODEV; - } - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(deve->stats, cpu); - bytes += stats->read_bytes; - } - - /* scsiAuthIntrReadMegaBytes */ - ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); - rcu_read_unlock(); - return ret; -} - -static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item, - char *page) -{ - struct se_lun_acl *lacl = auth_to_lacl(item); - struct se_node_acl *nacl = lacl->se_lun_nacl; - struct se_dev_entry_io_stats *stats; - struct se_dev_entry *deve; - unsigned int cpu; - ssize_t ret; - u32 bytes = 0; - - rcu_read_lock(); - deve = target_nacl_find_deve(nacl, lacl->mapped_lun); - if (!deve) { - rcu_read_unlock(); - return -ENODEV; - } - - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(deve->stats, cpu); - bytes += stats->write_bytes; - } - - /* scsiAuthIntrWrittenMegaBytes */ - ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); - rcu_read_unlock(); - return ret; -} +#define auth_show_per_cpu_stat(prefix, field, shift) \ +per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \ +static ssize_t \ +target_stat_##prefix##_show(struct config_item *item, char *page) \ +{ \ + struct se_lun_acl *lacl = auth_to_lacl(item); \ + struct se_node_acl *nacl = lacl->se_lun_nacl; \ + struct se_dev_entry *deve; \ + int ret; \ + \ + rcu_read_lock(); \ + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \ + if (!deve) { \ + rcu_read_unlock(); \ + return -ENODEV; \ + } \ + \ + ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \ + rcu_read_unlock(); \ + return ret; \ +} + +/* scsiAuthIntrOutCommands */ +auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0); +/* scsiAuthIntrReadMegaBytes */ +auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20); +/* scsiAuthIntrWrittenMegaBytes */ +auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20); static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item, char *page) diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index c0e429e5ef31..8b5ad50baa43 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -548,7 +548,7 @@ int core_tpg_register( ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, true, g_lun0_dev); if (ret < 0) { - kfree(se_tpg->tpg_virt_lun0); + target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head); return ret; } } @@ -595,7 +595,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) if (se_tpg->proto_id >= 0) { core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); - kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); + call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun); } target_tpg_deregister_rtpi(se_tpg); @@ -615,6 +615,13 @@ struct se_lun *core_tpg_alloc_lun( pr_err("Unable to allocate se_lun memory\n"); return ERR_PTR(-ENOMEM); } + + lun->lun_stats = alloc_percpu(struct scsi_port_stats); + if (!lun->lun_stats) { + pr_err("Unable to allocate se_lun stats memory\n"); + goto free_lun; + } + lun->unpacked_lun = unpacked_lun; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); @@ -628,6 +635,18 @@ struct se_lun *core_tpg_alloc_lun( lun->lun_tpg = tpg; return lun; + +free_lun: + kfree(lun); + return ERR_PTR(-ENOMEM); +} + +void target_tpg_free_lun(struct rcu_head *head) +{ + struct se_lun *lun = container_of(head, struct se_lun, rcu_head); + + free_percpu(lun->lun_stats); + kfree(lun); } int core_tpg_add_lun( diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0a76bdfe5528..fca9b44288bc 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1571,7 +1571,12 @@ target_cmd_parse_cdb(struct se_cmd *cmd) return ret; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); + /* + * If this is the xcopy_lun then we won't have lun_stats since we + * can't export them. + */ + if (cmd->se_lun->lun_stats) + this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus); return 0; } EXPORT_SYMBOL(target_cmd_parse_cdb); @@ -2597,8 +2602,9 @@ queue_rsp: !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) goto queue_status; - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.tx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); /* * Perform READ_STRIP of PI using software emulation when * backend had PI enabled, if the transport will not be @@ -2621,14 +2627,16 @@ queue_rsp: goto queue_full; break; case DMA_TO_DEVICE: - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.rx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets, + cmd->data_length); /* * Check if we need to send READ payload for BIDI-COMMAND */ if (cmd->se_cmd_flags & SCF_BIDI) { - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.tx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); ret = cmd->se_tfo->queue_data_in(cmd); if (ret) goto queue_full; diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig index 191fbd799ec5..7d5117b2dab4 100644 --- a/drivers/ufs/host/Kconfig +++ b/drivers/ufs/host/Kconfig @@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP Select this if you have UFS controller on Rockchip chipset. If unsure, say N. + +config SCSI_UFS_AMD_VERSAL2 + tristate "AMD Versal Gen 2 UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST) + help + This selects the AMD Versal Gen 2 specific additions on top of + the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD + Versal Gen 2 needs some vendor specific configurations like PHY + and vendor specific register accesses before accessing the + hardware. + + Select this if you have UFS controller on AMD Versal Gen 2 SoC. + If unsure, say N. diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile index 2f97feb5db3f..65d8bb23ab7b 100644 --- a/drivers/ufs/host/Makefile +++ b/drivers/ufs/host/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o +obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c index 21214e5d5896..43781593b5c1 100644 --- a/drivers/ufs/host/ti-j721e-ufs.c +++ b/drivers/ufs/host/ti-j721e-ufs.c @@ -15,18 +15,26 @@ #define TI_UFS_SS_RST_N_PCS BIT(0) #define TI_UFS_SS_CLK_26MHZ BIT(4) +struct ti_j721e_ufs { + void __iomem *regbase; + u32 reg; +}; + static int ti_j721e_ufs_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct ti_j721e_ufs *ufs; unsigned long clk_rate; - void __iomem *regbase; struct clk *clk; - u32 reg = 0; int ret; - regbase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(regbase)) - return PTR_ERR(regbase); + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + ufs->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ufs->regbase)) + return PTR_ERR(ufs->regbase); pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); @@ -42,12 +50,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev) } clk_rate = clk_get_rate(clk); if (clk_rate == 26000000) - reg |= TI_UFS_SS_CLK_26MHZ; + ufs->reg |= TI_UFS_SS_CLK_26MHZ; devm_clk_put(dev, clk); /* Take UFS slave device out of reset */ - reg |= TI_UFS_SS_RST_N_PCS; - writel(reg, regbase + TI_UFS_SS_CTRL); + ufs->reg |= TI_UFS_SS_RST_N_PCS; + writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL); + + dev_set_drvdata(dev, ufs); ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev); @@ -72,6 +82,16 @@ static void ti_j721e_ufs_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } +static int ti_j721e_ufs_resume(struct device *dev) +{ + struct ti_j721e_ufs *ufs = dev_get_drvdata(dev); + + writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL); + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ti_j721e_ufs_pm_ops, NULL, ti_j721e_ufs_resume); + static const struct of_device_id ti_j721e_ufs_of_match[] = { { .compatible = "ti,j721e-ufs", @@ -87,6 +107,7 @@ static struct platform_driver ti_j721e_ufs_driver = { .driver = { .name = "ti-j721e-ufs", .of_match_table = ti_j721e_ufs_of_match, + .pm = pm_sleep_ptr(&ti_j721e_ufs_pm_ops), }, }; module_platform_driver(ti_j721e_ufs_driver); diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c new file mode 100644 index 000000000000..40543db621a1 --- /dev/null +++ b/drivers/ufs/host/ufs-amd-versal2.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/irqreturn.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <ufs/unipro.h> + +#include "ufshcd-dwc.h" +#include "ufshcd-pltfrm.h" +#include "ufshci-dwc.h" + +/* PHY modes */ +#define UFSHCD_DWC_PHY_MODE_ROM 0 + +#define MPHY_FAST_RX_AFE_CAL BIT(2) +#define MPHY_FW_CALIB_CFG_VAL BIT(8) + +#define MPHY_RX_OVRD_EN BIT(3) +#define MPHY_RX_OVRD_VAL BIT(2) +#define MPHY_RX_ACK_MASK BIT(0) + +#define TIMEOUT_MICROSEC 1000000 + +struct ufs_versal2_host { + struct ufs_hba *hba; + struct reset_control *rstc; + struct reset_control *rstphy; + u32 phy_mode; + unsigned long host_clk; + u8 attcompval0; + u8 attcompval1; + u8 ctlecompval0; + u8 ctlecompval1; +}; + +static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val) +{ + static struct ufshcd_dme_attr_val phy_write_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_write_attrs[0].mib_val = (u8)addr; + phy_write_attrs[1].mib_val = (u8)(addr >> 8); + phy_write_attrs[2].mib_val = (u8)val; + phy_write_attrs[3].mib_val = (u8)(val >> 8); + + return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs)); +} + +static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val) +{ + u32 mib_val; + int ret; + static struct ufshcd_dme_attr_val phy_read_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_read_attrs[0].mib_val = (u8)addr; + phy_read_attrs[1].mib_val = (u8)(addr >> 8); + + ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs)); + if (ret) + return ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val); + if (ret) + return ret; + + *val = mib_val; + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val); + if (ret) + return ret; + + *val |= (mib_val << 8); + + return 0; +} + +static int ufs_versal2_enable_phy(struct ufs_hba *hba) +{ + u32 offset, reg; + int ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + /* Check Tx/Rx FSM states */ + for (offset = 0; offset < 2; offset++) { + u32 time_left, mibsel; + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP || + reg == TX_STATE_LSBURST) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Tx FSM state.\n"); + return -ETIMEDOUT; + } + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP || + reg == RX_STATE_LSBURST) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx FSM state.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_versal2_setup_phy(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + int ret; + u32 reg; + + /* Bypass RX-AFE offset calibrations (ATT/CTLE) */ + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg); + if (ret) + return ret; + + /* Program ATT and CTLE compensation values */ + if (host->attcompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0); + if (ret) + return ret; + } + + if (host->attcompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1); + if (ret) + return ret; + } + + if (host->ctlecompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0); + if (ret) + return ret; + } + + if (host->ctlecompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1); + if (ret) + return ret; + } + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg); +} + +static int ufs_versal2_phy_init(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + u32 time_left; + bool is_ready; + int ret; + static const struct ufshcd_dme_attr_val rmmi_attrs[] = { + { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL }, + { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL }, + { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + /* Wait for Tx/Rx config_rdy */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready); + if (ret) + return ret; + + if (!is_ready) + break; + + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Tx/Rx configuration signal busy.\n"); + return -ETIMEDOUT; + } + + ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs)); + if (ret) + return ret; + + ret = reset_control_deassert(host->rstphy); + if (ret) { + dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret); + return ret; + } + + /* Wait for SRAM init done */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_is_sram_init_done(&is_ready); + if (ret) + return ret; + + if (is_ready) + break; + + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "SRAM initialization failed.\n"); + return -ETIMEDOUT; + } + + ret = ufs_versal2_setup_phy(hba); + if (ret) + return ret; + + return ufs_versal2_enable_phy(hba); +} + +static int ufs_versal2_init(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host; + struct device *dev = hba->dev; + struct ufs_clk_info *clki; + int ret; + u32 cal; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->hba = hba; + ufshcd_set_variant(hba, host); + + host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM; + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core")) + host->host_clk = clk_get_rate(clki->clk); + } + + host->rstc = devm_reset_control_get_exclusive(dev, "host"); + if (IS_ERR(host->rstc)) { + dev_err(dev, "failed to get reset ctrl: host\n"); + return PTR_ERR(host->rstc); + } + + host->rstphy = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(host->rstphy)) { + dev_err(dev, "failed to get reset ctrl: phy\n"); + return PTR_ERR(host->rstphy); + } + + ret = reset_control_assert(host->rstc); + if (ret) { + dev_err(hba->dev, "host reset assert failed, err = %d\n", ret); + return ret; + } + + ret = reset_control_assert(host->rstphy); + if (ret) { + dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret); + return ret; + } + + ret = zynqmp_pm_set_sram_bypass(); + if (ret) { + dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret); + return ret; + } + + ret = reset_control_deassert(host->rstc); + if (ret) + dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret); + + ret = zynqmp_pm_get_ufs_calibration_values(&cal); + if (ret) { + dev_err(dev, "failed to read calibration values\n"); + return ret; + } + + host->attcompval0 = (u8)cal; + host->attcompval1 = (u8)(cal >> 8); + host->ctlecompval0 = (u8)(cal >> 16); + host->ctlecompval1 = (u8)(cal >> 24); + + hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING; + + return 0; +} + +static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + if (status == PRE_CHANGE) { + ret = ufs_versal2_phy_init(hba); + if (ret) + dev_err(hba->dev, "Phy init failed (%d)\n", ret); + } + + return ret; +} + +static int ufs_versal2_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + if (host->host_clk) + ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV); + + break; + case POST_CHANGE: + ret = ufshcd_dwc_link_startup_notify(hba, status); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req) +{ + u32 time_left, reg, lane; + int ret; + + for (lane = 0; lane < activelanes; lane++) { + time_left = TIMEOUT_MICROSEC; + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg |= MPHY_RX_OVRD_EN; + if (rx_req) + reg |= MPHY_RX_OVRD_VAL; + else + reg &= ~MPHY_RX_OVRD_VAL; + + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + + do { + ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), ®); + if (ret) + return ret; + + reg &= MPHY_RX_ACK_MASK; + if (reg == rx_req) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx Ack value.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + u32 lane, reg, rate = 0; + int ret = 0; + + if (status == PRE_CHANGE) { + memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr)); + + /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ + if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 && + !host->ctlecompval1) { + dev_req_params->pwr_rx = SLOW_MODE; + dev_req_params->pwr_tx = SLOW_MODE; + return 0; + } + + if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE) + return 0; + + if (dev_req_params->hs_rate == PA_HS_MODE_B) + rate = 1; + + /* Select the rate */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0); + if (ret) + return ret; + + /* Remove rx_req override */ + for (lane = 0; lane < dev_req_params->lane_tx; lane++) { + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg &= ~MPHY_RX_OVRD_EN; + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + } + + if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2) + ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + + return ret; +} + +static struct ufs_hba_variant_ops ufs_versal2_hba_vops = { + .name = "ufs-versal2-pltfm", + .init = ufs_versal2_init, + .link_startup_notify = ufs_versal2_link_startup_notify, + .hce_enable_notify = ufs_versal2_hce_enable_notify, + .pwr_change_notify = ufs_versal2_pwr_change_notify, +}; + +static const struct of_device_id ufs_versal2_pltfm_match[] = { + { + .compatible = "amd,versal2-ufs", + .data = &ufs_versal2_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match); + +static int ufs_versal2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + /* Perform generic probe */ + ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops); + if (ret) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret); + + return ret; +} + +static void ufs_versal2_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); +} + +static const struct dev_pm_ops ufs_versal2_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +}; + +static struct platform_driver ufs_versal2_pltfm = { + .probe = ufs_versal2_probe, + .remove = ufs_versal2_remove, + .driver = { + .name = "ufshcd-versal2", + .pm = &ufs_versal2_pm_ops, + .of_match_table = of_match_ptr(ufs_versal2_pltfm_match), + }, +}; + +module_platform_driver(ufs_versal2_pltfm); + +MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>"); +MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h index ad91ea56662c..c618bb914904 100644 --- a/drivers/ufs/host/ufshcd-dwc.h +++ b/drivers/ufs/host/ufshcd-dwc.h @@ -12,6 +12,52 @@ #include <ufs/ufshcd.h> +/* RMMI Attributes */ +#define CBREFCLKCTRL2 0x8132 +#define CBCRCTRL 0x811F +#define CBC10DIRECTCONF2 0x810E +#define CBRATESEL 0x8114 +#define CBCREGADDRLSB 0x8116 +#define CBCREGADDRMSB 0x8117 +#define CBCREGWRLSB 0x8118 +#define CBCREGWRMSB 0x8119 +#define CBCREGRDLSB 0x811A +#define CBCREGRDMSB 0x811B +#define CBCREGRDWRSEL 0x811C + +#define CBREFREFCLK_GATE_OVR_EN BIT(7) + +/* M-PHY Attributes */ +#define MTX_FSM_STATE 0x41 +#define MRX_FSM_STATE 0xC1 + +/* M-PHY registers */ +#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100)) +#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100)) +#define FAST_FLAGS(n) (0x401C + ((n) * 0x100)) +#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100)) +#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100)) +#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100)) + +/* Tx/Rx FSM state */ +enum rx_fsm_state { + RX_STATE_DISABLED = 0, + RX_STATE_HIBERN8 = 1, + RX_STATE_SLEEP = 2, + RX_STATE_STALL = 3, + RX_STATE_LSBURST = 4, + RX_STATE_HSBURST = 5, +}; + +enum tx_fsm_state { + TX_STATE_DISABLED = 0, + TX_STATE_HIBERN8 = 1, + TX_STATE_SLEEP = 2, + TX_STATE_STALL = 3, + TX_STATE_LSBURST = 4, + TX_STATE_HSBURST = 5, +}; + struct ufshcd_dme_attr_val { u32 attr_sel; u32 mib_val; diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h new file mode 100644 index 000000000000..d3538dd5822a --- /dev/null +++ b/include/linux/firmware/xlnx-zynqmp-ufs.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Firmware layer for UFS APIs. + * + * Copyright (c) 2025 Advanced Micro Devices, Inc. + */ + +#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__ +#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__ + +#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready); +int zynqmp_pm_is_sram_init_done(bool *is_done); +int zynqmp_pm_set_sram_bypass(void); +int zynqmp_pm_get_ufs_calibration_values(u32 *val); +#else +static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_sram_bypass(void) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return -ENODEV; +} +#endif + +#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index ae48d619c4e0..784d5920b4cd 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/err.h> +#include <linux/firmware/xlnx-zynqmp-ufs.h> #define ZYNQMP_PM_VERSION_MAJOR 1 #define ZYNQMP_PM_VERSION_MINOR 0 @@ -241,6 +242,7 @@ enum pm_ioctl_id { IOCTL_GET_FEATURE_CONFIG = 27, /* IOCTL for Secure Read/Write Interface */ IOCTL_READ_REG = 28, + IOCTL_MASK_WRITE_REG = 29, /* Dynamic SD/GEM configuration */ IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_GEM_CONFIG = 31, @@ -619,6 +621,9 @@ int zynqmp_pm_feature(const u32 api_id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value); +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); int zynqmp_pm_force_pwrdwn(const u32 target, const enum zynqmp_pm_request_ack ack); @@ -916,6 +921,17 @@ static inline int zynqmp_pm_request_wake(const u32 node, return -ENODEV; } +static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value) +{ + return -ENODEV; +} + static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) { return -ENODEV; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1e2e599517e9..d62265d12cfe 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -179,6 +179,12 @@ struct scsi_device { unsigned manage_shutdown:1; /* + * If true, let the high-level device driver (sd) manage the device + * power state for system restart (reboot) operations. + */ + unsigned manage_restart:1; + + /* * If set and if the device is runtime suspended, ask the high-level * device driver (sd) to force a runtime resume of the device. */ diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 4063a701081b..e32de80854b6 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -121,8 +121,10 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, bool target_sense_desc_format(struct se_device *dev); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); -bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct block_device *bdev); +bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev); +void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev); static inline bool target_dev_configured(struct se_device *se_dev) { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c4d9116904aa..7016d93fa383 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -158,6 +158,7 @@ enum se_cmd_flags_table { SCF_TASK_ATTR_SET = (1 << 17), SCF_TREAT_READ_AS_NORMAL = (1 << 18), SCF_TASK_ORDERED_SYNC = (1 << 19), + SCF_ATOMIC = (1 << 20), }; /* @@ -671,9 +672,9 @@ struct se_lun_acl { }; struct se_dev_entry_io_stats { - u32 total_cmds; - u32 read_bytes; - u32 write_bytes; + u64 total_cmds; + u64 read_bytes; + u64 write_bytes; }; struct se_dev_entry { @@ -731,6 +732,11 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u32 atomic_max_len; + u32 atomic_alignment; + u32 atomic_granularity; + u32 atomic_max_with_boundary; + u32 atomic_max_boundary; u8 submit_type; struct se_device *da_dev; struct config_group da_group; @@ -744,9 +750,9 @@ struct se_port_stat_grps { }; struct scsi_port_stats { - atomic_long_t cmd_pdus; - atomic_long_t tx_data_octets; - atomic_long_t rx_data_octets; + u64 cmd_pdus; + u64 tx_data_octets; + u64 rx_data_octets; }; struct se_lun { @@ -773,7 +779,7 @@ struct se_lun { spinlock_t lun_tg_pt_gp_lock; struct se_portal_group *lun_tpg; - struct scsi_port_stats lun_stats; + struct scsi_port_stats __percpu *lun_stats; struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_shutdown_comp; @@ -806,9 +812,9 @@ struct se_device_queue { }; struct se_dev_io_stats { - u32 total_cmds; - u32 read_bytes; - u32 write_bytes; + u64 total_cmds; + u64 read_bytes; + u64 write_bytes; }; struct se_device { diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 498ec9028b3c..59de737490ca 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -179,6 +179,7 @@ #define VS_POWERSTATE 0xD083 #define VS_MPHYCFGUPDT 0xD085 #define VS_DEBUGOMC 0xD09E +#define VS_MPHYDISABLE 0xD0C1 #define PA_GRANULARITY_MIN_VAL 1 #define PA_GRANULARITY_MAX_VAL 6 |