summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2025-12-02 14:52:36 +0100
committerJiri Kosina <jkosina@suse.com>2025-12-02 14:52:36 +0100
commit1a2f09ae11b4657d49df6fa86744795f99c2ee27 (patch)
treecb178345ab4408702ec6de5fe9910924bfae96d7 /drivers
parent59c752c59f12cfb56bbc0b213a4d24407959688a (diff)
parent3b86c87f8dcbb8ba3b00d7adf7ccfff086f0f23e (diff)
Merge branch 'for-6.19/hid-bpf' into for-linus
- Bring in a couple more BPF drivers for various devices (Benjamin Tissoires)
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_mrrm.c46
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/tbprint.c6
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cppc_acpi.c8
-rw-r--r--drivers/acpi/fan.h7
-rw-r--r--drivers/acpi/fan_attr.c2
-rw-r--r--drivers/acpi/fan_core.c36
-rw-r--r--drivers/acpi/fan_hwmon.c11
-rw-r--r--drivers/acpi/numa/hmat.c46
-rw-r--r--drivers/acpi/numa/srat.c2
-rw-r--r--drivers/acpi/property.c2
-rw-r--r--drivers/acpi/riscv/rimt.c122
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/android/binder.c38
-rw-r--r--drivers/android/binder/freeze.rs18
-rw-r--r--drivers/android/binder/node.rs2
-rw-r--r--drivers/android/binder/process.rs50
-rw-r--r--drivers/android/binder/transaction.rs6
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/devcoredump.c136
-rw-r--r--drivers/base/regmap/regmap-slimbus.c6
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk/main.c1
-rw-r--r--drivers/bluetooth/bpa10x.c4
-rw-r--r--drivers/bluetooth/btintel_pcie.c11
-rw-r--r--drivers/bluetooth/btmtksdio.c12
-rw-r--r--drivers/bluetooth/btmtkuart.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c4
-rw-r--r--drivers/bluetooth/btrtl.c28
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_aml.c2
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_h4.c6
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_mrvl.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c4
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h2
-rw-r--r--drivers/char/tpm/tpm_crb.c29
-rw-r--r--drivers/comedi/comedi_buf.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c9
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c5
-rw-r--r--drivers/cpuidle/governors/menu.c28
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c2
-rw-r--r--drivers/cxl/acpi.c2
-rw-r--r--drivers/cxl/core/features.c3
-rw-r--r--drivers/cxl/core/port.c26
-rw-r--r--drivers/cxl/core/region.c13
-rw-r--r--drivers/cxl/core/trace.h2
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/dpll/dpll_netlink.c36
-rw-r--r--drivers/dpll/zl3073x/dpll.c2
-rw-r--r--drivers/edac/altera_edac.c22
-rw-r--r--drivers/edac/versalnet_edac.c26
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-topology.c3
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firmware/arm_ffa/driver.c37
-rw-r--r--drivers/firmware/arm_scmi/common.h32
-rw-r--r--drivers/firmware/arm_scmi/driver.c59
-rw-r--r--drivers/gpio/gpio-104-idio-16.c1
-rw-r--r--drivers/gpio/gpio-aggregator.c1
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-ljca.c14
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c1
-rw-r--r--drivers/gpio/gpio-regmap.c26
-rw-r--r--drivers/gpio/gpio-tb10x.c19
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c31
-rw-r--r--drivers/gpio/gpiolib-swnode.c2
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c11
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c18
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h8
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml2
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c4
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_panic.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c16
-rw-r--r--drivers/gpu/drm/imagination/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c24
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c14
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c18
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c40
-rw-r--r--drivers/gpu/drm/tiny/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device.c14
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c3
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c14
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c3
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c45
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c17
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c100
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c11
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c563
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c29
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c1395
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c724
-rw-r--r--drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c86
-rw-r--r--drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c321
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c305
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco02.bpf.c359
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h10
-rw-r--r--drivers/hwmon/cgbc-hwmon.c3
-rw-r--r--drivers/hwmon/gpd-fan.c64
-rw-r--r--drivers/hwmon/pmbus/isl68137.c3
-rw-r--r--drivers/hwmon/pmbus/max34440.c12
-rw-r--r--drivers/hwmon/sht3x.c27
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c50
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c11
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c58
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/irdma/type.h2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c1
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c11
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c12
-rw-r--r--drivers/iommu/iommufd/ioas.c4
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c5
-rw-r--r--drivers/irqchip/irq-riscv-intc.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c5
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c9
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c30
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h8
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c11
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c22
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h6
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c15
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/misc/amd-sbi/Kconfig2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/mei_lb.c3
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/vmw_balloon.c8
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c4
-rw-r--r--drivers/mmc/host/pxamci.c56
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c2
-rw-r--r--drivers/most/most_usb.c13
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/ecc-realtek.c6
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3
-rw-r--r--drivers/mtd/nand/spi/fmsh.c2
-rw-r--r--drivers/net/bonding/bond_main.c52
-rw-r--r--drivers/net/bonding/bond_options.c9
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/dev/netlink.c6
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c36
-rw-r--r--drivers/net/dsa/b53/b53_regs.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c7
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c15
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c34
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c7
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c10
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h4
-rw-r--r--drivers/net/mctp/mctp-usb.c8
-rw-r--r--drivers/net/mdio/mdio-airoha.c2
-rw-r--r--drivers/net/netconsole.c31
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/phy/micrel.c179
-rw-r--r--drivers/net/phy/realtek/realtek_main.c16
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c67
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c156
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c12
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c71
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvme/target/auth.c5
-rw-r--r--drivers/nvmem/rcar-efuse.c1
-rw-r--r--drivers/of/irq.c44
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c100
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aspm.c59
-rw-r--r--drivers/pci/probe.c20
-rw-r--r--drivers/pci/quirks.c42
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pci/vgaarb.c6
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c12
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c12
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c5
-rw-r--r--drivers/platform/x86/intel/int3472/led.c2
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c13
-rw-r--r--drivers/pmdomain/imx/gpc.c2
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c29
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c1
-rw-r--r--drivers/scsi/hosts.c5
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/storvsc_drv.c96
-rw-r--r--drivers/soc/ti/knav_dma.c14
-rw-r--r--drivers/spi/spi-airoha-snfi.c128
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c5
-rw-r--r--drivers/spi/spi-dw-mmio.c4
-rw-r--r--drivers/spi/spi-imx.c15
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-intel.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c32
-rw-r--r--drivers/spi/spi-rockchip-sfc.c12
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi.c12
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c12
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c5
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c13
-rw-r--r--drivers/tee/qcomtee/Kconfig1
-rw-r--r--drivers/tee/qcomtee/call.c2
-rw-r--r--drivers/tee/qcomtee/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c11
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sh-sci.c14
-rw-r--r--drivers/ufs/core/ufs-sysfs.c2
-rw-r--r--drivers/ufs/core/ufs-sysfs.h1
-rw-r--r--drivers/ufs/core/ufshcd.c45
-rw-r--r--drivers/ufs/host/ufs-qcom.c15
-rw-r--r--drivers/ufs/host/ufshcd-pci.c70
-rw-r--r--drivers/usb/core/quirks.c2
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c15
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c173
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c19
-rw-r--r--drivers/video/fbdev/core/fbmem.c1
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
448 files changed, 7736 insertions, 2095 deletions
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 47ea3ccc2142..6d69554c940e 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
+ if (mrrm->header.revision != 1)
+ return -EINVAL;
+
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;
@@ -149,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
static __init int add_boot_memory_ranges(void)
{
- struct kobject *pkobj, *kobj;
+ struct kobject *pkobj, *kobj, **kobjs;
int ret = -EINVAL;
- char *name;
+ char name[16];
+ int i;
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+ if (!pkobj)
+ return -ENOMEM;
- for (int i = 0; i < mrrm_mem_entry_num; i++) {
- name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name) {
- ret = -ENOMEM;
- break;
- }
+ kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
+ if (!kobjs) {
+ kobject_put(pkobj);
+ return -ENOMEM;
+ }
+ for (i = 0; i < mrrm_mem_entry_num; i++) {
+ scnprintf(name, sizeof(name), "range%d", i);
kobj = kobject_create_and_add(name, pkobj);
+ if (!kobj) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
ret = sysfs_create_groups(kobj, memory_range_groups);
- if (ret)
- return ret;
+ if (ret) {
+ kobject_put(kobj);
+ goto cleanup;
+ }
+ kobjs[i] = kobj;
}
+ kfree(kobjs);
+ return 0;
+
+cleanup:
+ for (int j = 0; j < i; j++) {
+ if (kobjs[j]) {
+ sysfs_remove_groups(kobjs[j], memory_range_groups);
+ kobject_put(kobjs[j]);
+ }
+ }
+ kfree(kobjs);
+ kobject_put(pkobj);
return ret;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..be8e7e18abca 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
- list_for_each_entry(dev, &video->video_device_list, entry)
+ list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
+ }
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 049f6c2f1e32..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0a7026040188..3c6dd9b4ba0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
- if (error)
+ if (error) {
+ input_free_device(input);
goto err_remove_fs;
+ }
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index ab4651205e8a..3bdeeee3414e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -750,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
/*
- * Disregard _CPC if the number of entries in the return pachage is not
+ * Disregard _CPC if the number of entries in the return package is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/
@@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 8a28a72a7c6a..bedbab0e8e4e 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -49,6 +49,7 @@ struct acpi_fan_fst {
};
struct acpi_fan {
+ acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
@@ -59,14 +60,14 @@ struct acpi_fan {
struct device_attribute fine_grain_control;
};
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
-int devm_acpi_fan_create_hwmon(struct acpi_device *device);
+int devm_acpi_fan_create_hwmon(struct device *dev);
#else
-static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
+static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
#endif
#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index c1afb7b5ed3d..9b7fa52f3c2a 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
- status = acpi_fan_get_fst(acpi_dev, &fst);
+ status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 04ff608f2ff0..46e7fe7a506d 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "Get fan state failed\n");
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = buffer.pointer;
- if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
- obj->package.count != 3 ||
- obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
- dev_err(&device->dev, "Invalid _FST data\n");
- ret = -EINVAL;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EPROTO;
goto err;
}
@@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
- status = acpi_fan_get_fst(device, &fst);
+ status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
+ if (!device)
+ return -ENODEV;
+
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
+
+ fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
@@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
- result = devm_acpi_fan_create_hwmon(device);
+ result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index e8d90605106e..4b2c2007f2d7 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
- struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
- ret = acpi_fan_get_fst(adev, &fst);
+ ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
-int devm_acpi_fan_create_hwmon(struct acpi_device *device)
+int devm_acpi_fan_create_hwmon(struct device *dev)
{
- struct acpi_fan *fan = acpi_driver_data(device);
+ struct acpi_fan *fan = dev_get_drvdata(dev);
struct device *hdev;
- hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
- &acpi_fan_hwmon_chip_info, NULL);
+ hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hdev);
}
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 5a36d57289b4..11e4483685c9 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -874,11 +874,33 @@ static void hmat_register_target_devices(struct memory_target *target)
}
}
-static void hmat_register_target(struct memory_target *target)
+static void hmat_hotplug_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
+ * "specific purpose", is applied to all the memory in a proximity
+ * domain leading to * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ guard(mutex)(&target_lock);
+ if (target->registered)
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+}
+
+static void hmat_register_target(struct memory_target *target)
+{
+ /*
* Devices may belong to either an offline or online
* node, so unconditionally add them.
*/
@@ -895,25 +917,7 @@ static void hmat_register_target(struct memory_target *target)
}
mutex_unlock(&target_lock);
- /*
- * Skip offline nodes. This can happen when memory
- * marked EFI_MEMORY_SP, "specific purpose", is applied
- * to all the memory in a proximity domain leading to
- * the node being marked offline / unplugged, or if
- * memory-only "hotplug" node is offline.
- */
- if (nid == NUMA_NO_NODE || !node_online(nid))
- return;
-
- mutex_lock(&target_lock);
- if (!target->registered) {
- hmat_register_target_initiators(target);
- hmat_register_target_cache(target);
- hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
- hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
- target->registered = true;
- }
- mutex_unlock(&target_lock);
+ hmat_hotplug_target(target);
}
static void hmat_register_targets(void)
@@ -939,7 +943,7 @@ static int hmat_callback(struct notifier_block *self,
if (!target)
return NOTIFY_OK;
- hmat_register_target(target);
+ hmat_hotplug_target(target);
return NOTIFY_OK;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 53816dfab645..aa87ee1583a4 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -237,7 +237,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_generic_affinity *p =
(struct acpi_srat_generic_affinity *)header;
- if (p->device_handle_type == 0) {
+ if (p->device_handle_type == 1) {
/*
* For pci devices this may be the only place they
* are assigned a proximity domain
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 1b997a5497e7..43d5e457814e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
size_t num_args,
struct fwnode_reference_args *args)
{
- return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
index 683fcfe35c31..7f423405e5ef 100644
--- a/drivers/acpi/riscv/rimt.c
+++ b/drivers/acpi/riscv/rimt.c
@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
-/**
- * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
- *
- * @node: RIMT table node to be looked-up
- *
- * Returns: fwnode_handle pointer on success, NULL on failure
- */
-static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
-{
- struct fwnode_handle *fwnode = NULL;
- struct rimt_fwnode *curr;
-
- spin_lock(&rimt_fwnode_lock);
- list_for_each_entry(curr, &rimt_fwnode_list, list) {
- if (curr->rimt_node == node) {
- fwnode = curr->fwnode;
- break;
- }
- }
- spin_unlock(&rimt_fwnode_lock);
-
- return fwnode;
-}
-
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
-/*
- * RISC-V supports IOMMU as a PCI device or a platform device.
- * When it is a platform device, there should be a namespace device as
- * well along with RIMT. To create the link between RIMT information and
- * the platform device, the IOMMU driver should register itself with the
- * RIMT module. This is true for PCI based IOMMU as well.
- */
-int rimt_iommu_register(struct device *dev)
-{
- struct fwnode_handle *rimt_fwnode;
- struct acpi_rimt_node *node;
-
- node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
- if (!node) {
- pr_err("Could not find IOMMU node in RIMT\n");
- return -ENODEV;
- }
-
- if (dev_is_pci(dev)) {
- rimt_fwnode = acpi_alloc_fwnode_static();
- if (!rimt_fwnode)
- return -ENOMEM;
-
- rimt_fwnode->dev = dev;
- if (!dev->fwnode)
- dev->fwnode = rimt_fwnode;
-
- rimt_set_fwnode(node, rimt_fwnode);
- } else {
- rimt_set_fwnode(node, dev->fwnode);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index a3f95a3fffde..d3edc3bcbf01 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -487,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
if (result)
return result;
- battery->present = state & (1 << battery->id);
+ battery->present = !!(state & (1 << battery->id));
if (!battery->present)
return 0;
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index d4d52d5e9016..73cb933fdc89 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* Baud Rate field. If this field is zero or not present, Configured
* Baud Rate is used.
*/
- if (table->precise_baudrate)
+ if (table->header.revision >= 4 && table->precise_baudrate)
baud_rate = table->precise_baudrate;
else switch (table->baud_rate) {
case 0:
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8c99ceaa303b..a3a1b5c33ba3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -2418,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
index e68c3c8bc55a..220de35ae85a 100644
--- a/drivers/android/binder/freeze.rs
+++ b/drivers/android/binder/freeze.rs
@@ -106,13 +106,22 @@ impl DeliverToRead for FreezeMessage {
return Ok(true);
}
if freeze.is_clearing {
- _removed_listener = freeze_entry.remove_node();
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
drop(node_refs);
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
writer.write_payload(&self.cookie.0)?;
Ok(true)
} else {
- let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.last_is_frozen == Some(is_frozen) {
return Ok(true);
}
@@ -245,8 +254,9 @@ impl Process {
);
return Err(EINVAL);
}
- if freeze.is_clearing {
- // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
clear_msg = Some(FreezeMessage::init(alloc, cookie));
}
freeze.is_pending = false;
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
index ade895ef791e..08d362deaf61 100644
--- a/drivers/android/binder/node.rs
+++ b/drivers/android/binder/node.rs
@@ -687,7 +687,7 @@ impl Node {
);
}
if inner.freeze_list.is_empty() {
- _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ _unused_capacity = mem::take(&mut inner.freeze_list);
}
}
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index f13a747e784c..7607353a5e92 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -72,6 +72,33 @@ impl Mapping {
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
/// The fields of `Process` protected by the spinlock.
pub(crate) struct ProcessInner {
is_manager: bool,
@@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
/// are woken up.
outstanding_txns: u32,
/// Process is frozen and unable to service binder transactions.
- pub(crate) is_frozen: bool,
+ pub(crate) is_frozen: IsFrozen,
/// Process received sync transactions since last frozen.
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
@@ -124,7 +151,7 @@ impl ProcessInner {
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
- is_frozen: false,
+ is_frozen: IsFrozen::No,
sync_recv: false,
async_recv: false,
binderfs_file: None,
@@ -1260,7 +1287,7 @@ impl Process {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
@@ -1346,10 +1373,6 @@ impl Process {
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
- pr_warn!(
- "{}: removing orphan mapping {offset}:{size}\n",
- self.pid_in_current_ns()
- );
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
@@ -1371,7 +1394,7 @@ impl Process {
return;
}
inner.outstanding_txns -= 1;
- inner.is_frozen && inner.outstanding_txns == 0
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
};
if wake {
@@ -1385,7 +1408,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
drop(inner);
msgs.send_messages();
return Ok(());
@@ -1394,7 +1417,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = true;
+ inner.is_frozen = IsFrozen::InProgress;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
@@ -1408,7 +1431,7 @@ impl Process {
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
@@ -1422,17 +1445,18 @@ impl Process {
}
if inner.txns_pending_locked() {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
- self.inner.lock().is_frozen = false;
+ self.inner.lock().is_frozen = IsFrozen::No;
Err(ENOMEM)
}
}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
index 02512175d622..4bd3c0e417eb 100644
--- a/drivers/android/binder/transaction.rs
+++ b/drivers/android/binder/transaction.rs
@@ -249,7 +249,7 @@ impl Transaction {
if oneway {
if let Some(target_node) = self.target_node.clone() {
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
if let Some(t_outdated) =
@@ -270,7 +270,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
return Err(BinderError::new_frozen_oneway());
} else {
return Ok(());
@@ -280,7 +280,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.sync_recv = true;
return Err(BinderError::new_frozen());
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..e1eff05bea4a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3c533dab8fa5..f69dc9c85954 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 37faf6156d7c..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 54eb7d227cf4..e523fae73004 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
- lock_key, lock_name);
+ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
- lock_key, lock_name);
+ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6ecfc821cf83..72f045e6ed51 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
+ struct device_node *np;
+
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
+ np = core->dev.of_node;
+ if (np && !of_device_is_available(np))
+ continue;
+
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1188f32a5e5e..a853c65ac65d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -52,6 +52,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
+static struct cred *nbd_cred;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
int result;
struct msghdr msg = {} ;
unsigned int noreclaim_flag;
+ const struct cred *old_cred;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
return -EINVAL;
}
+ old_cred = override_creds(nbd_cred);
+
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
@@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
memalloc_noreclaim_restore(noreclaim_flag);
+ revert_creds(old_cred);
+
return result;
}
@@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
return -ENOMEM;
}
+ nbd_cred = prepare_kernel_cred(&init_task);
+ if (!nbd_cred) {
+ destroy_workqueue(nbd_del_wq);
+ unregister_blkdev(NBD_MAJOR, "nbd");
+ return -ENOMEM;
+ }
+
if (genl_register_family(&nbd_genl_family)) {
+ put_cred(nbd_cred);
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
@@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
+ put_cred(nbd_cred);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index f982027e8c85..0ee55f889cfd 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
+ .dma_alignment = dev->blocksize - 1,
};
struct nullb *nullb;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index b7ba667a3d09..e305d04aac9d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
+ struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
- data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
+ data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
+ data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6d3963bd56a9..a075d8ec4677 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
- /* This interrupt is triggered by the firmware after updating
- * boot_stage register and image_response register
- */
- if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
- btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
@@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_tx_handle(data);
}
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
+
/*
* Before sending the interrupt the HW disables it to prevent a nested
* interrupt. This is done by writing 1 to the corresponding bit in
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 50abefba6d04..62db31bd6592 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
+ /* set drv_pmctrl if BT is closed before doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ sdio_enable_func(bdev->func);
+ btmtksdio_drv_pmctrl(bdev);
+ }
+
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
goto err;
}
+ /* set fw_pmctrl back if BT is closed after doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ btmtksdio_fw_pmctrl(bdev);
+ sdio_disable_func(bdev->func);
+ }
+
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index d9b90ea2ad38..27aa48ff3ac2 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -79,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
+ struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
- bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
+ bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index d5153fed0518..3b1e9224e965 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -212,6 +212,7 @@ struct btnxpuart_dev {
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
struct reset_control *pdn;
+ struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
- nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
@@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
reset_control_deassert(nxpdev->pdn);
nxpdev->hdev = hdev;
+ nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 6abd962502e3..52794db2739b 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -50,7 +50,7 @@
#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
-#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
+#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0xAD, 0x00, 0xb0}})
#define RTL_PATCH_SNIPPETS 0x01
#define RTL_PATCH_DUMMY_HEADER 0x02
@@ -534,7 +534,6 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
{
struct rtl_epatch_header_v2 *hdr;
int rc;
- u8 reg_val[2];
u8 key_id;
u32 num_sections;
struct rtl_section *section;
@@ -549,14 +548,7 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
.len = btrtl_dev->fw_len - 7, /* Cut the tail */
};
- rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
- if (rc < 0)
- return -EIO;
- key_id = reg_val[0];
-
- rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
-
- btrtl_dev->key_id = key_id;
+ key_id = btrtl_dev->key_id;
hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
if (!hdr)
@@ -625,8 +617,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
- if (!len)
+ if (!len) {
+ kvfree(ptr);
return -EPERM;
+ }
*_buf = ptr;
return len;
@@ -1068,6 +1062,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
+ int rc;
+ u8 key_id;
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@@ -1178,6 +1174,14 @@ next:
goto err_free;
}
+ rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
+ if (rc < 0)
+ goto err_free;
+
+ key_id = reg_val[0];
+ btrtl_dev->key_id = key_id;
+ rtl_dev_info(hdev, "%s: key id %u", __func__, key_id);
+
btrtl_dev->fw_len = -EIO;
if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
@@ -1200,7 +1204,7 @@ next:
goto err_free;
}
- if (btrtl_dev->ic_info->cfg_name) {
+ if (btrtl_dev->ic_info->cfg_name && !btrtl_dev->key_id) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5e9ebf0c5312..a722446ec73d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4361,6 +4361,11 @@ static void btusb_disconnect(struct usb_interface *intf)
hci_unregister_dev(hdev);
+ if (data->oob_wake_irq)
+ device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
if (intf == data->intf) {
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
@@ -4371,17 +4376,11 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->diag);
usb_driver_release_interface(&btusb_driver, data->intf);
} else if (intf == data->diag) {
- usb_driver_release_interface(&btusb_driver, data->intf);
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
+ usb_driver_release_interface(&btusb_driver, data->intf);
}
- if (data->oob_wake_irq)
- device_init_wakeup(&data->udev->dev, false);
-
- if (data->reset_gpio)
- gpiod_put(data->reset_gpio);
-
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 2d40302409ff..94588676510f 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index 707e90f80130..b1f32c5a8a3f 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
struct aml_data *aml_data = hu->priv;
int err;
- aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
+ aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
aml_recv_pkts,
ARRAY_SIZE(aml_recv_pkts));
if (IS_ERR(aml_data->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index dbfe34664633..8d2b5e7f0d6a 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
- ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index f96617b85d87..fff845ed44e3 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 9070e31a68bf..ec017df8572c 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 9b353c3d6442..1d6e09508f1f 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 7044c86325ce..6f4e25917b86 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
+ ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index e08222395772..8767522ec4c6 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
- mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
- mrvl_recv_pkts,
- ARRAY_SIZE(mrvl_recv_pkts));
+ mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index cd7575c20f65..1e65b541f8ad 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
- nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
+ btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
+ nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4cff4d9be313..888176b0faa9 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index cbbe79b241ce..48ac7ca9334e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -162,7 +162,7 @@ struct h4_recv_pkt {
int h4_init(void);
int h4_deinit(void);
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ed97344f2324..c75a531cfb98 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method)
{
return !(start_method == ACPI_TPM2_START_METHOD ||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
- start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
- start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
}
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
@@ -191,7 +190,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* Return: 0 always
*/
-static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -200,6 +199,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -220,7 +225,7 @@ static int crb_go_idle(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_go_idle(dev, priv);
+ return __crb_go_idle(dev, priv, chip->locality);
}
/**
@@ -238,7 +243,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -247,6 +252,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -267,7 +278,7 @@ static int crb_cmd_ready(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_cmd_ready(dev, priv);
+ return __crb_cmd_ready(dev, priv, chip->locality);
}
static int __crb_request_locality(struct device *dev,
@@ -444,7 +455,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
/* Seems to be necessary for every command */
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
- __crb_cmd_ready(&chip->dev, priv);
+ __crb_cmd_ready(&chip->dev, priv, chip->locality);
memcpy_toio(priv->cmd, buf, len);
@@ -672,7 +683,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = __crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv, 0);
if (ret)
goto out_relinquish_locality;
@@ -744,7 +755,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- __crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv, 0);
out_relinquish_locality:
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 002c0e76baff..c7c262a2d8ca 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 298e92d8cc03..b44f0f7a5ba1 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 38897bb14a2c..492a10f1bdbf 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -603,9 +603,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- if (!cpu_feature_enabled(X86_FEATURE_IDA))
- return true;
-
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -2106,7 +2103,8 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -2271,7 +2269,8 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
return val;
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index a360bc4d20b7..19be6475d356 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -303,8 +304,8 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
- strcpy(drv->states[0].name, "WFI");
- strcpy(drv->states[0].desc, "RISC-V WFI");
+ strscpy(drv->states[0].name, "WFI");
+ strscpy(drv->states[0].desc, "RISC-V WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 4d9aa5ce31f0..23239b0c04f9 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -188,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
+ *
+ * However, if the number of remaining samples is too small to exclude
+ * any more outliers, allow the deepest available idle state to be
+ * selected because there are systems where the time spent by CPUs in
+ * deep idle states is correlated to the maximum frequency the CPUs
+ * can get to. On those systems, shallow idle states should be avoided
+ * unless there is a clear indication that the given CPU is most likley
+ * going to be woken up shortly.
*/
- if (divisor * 4 <= INTERVALS * 3) {
- /*
- * If there are sufficiently many data points still under
- * consideration after the outliers have been eliminated,
- * returning without a prediction would be a mistake because it
- * is likely that the next interval will not exceed the current
- * maximum, so return the latter in that case.
- */
- if (divisor >= INTERVALS / 2)
- return max;
-
+ if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
- }
/* Update the thresholds for the next round. */
if (avg - min > max - avg)
@@ -321,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/*
* Use a physical idle state, not busy polling, unless a timer
- * is going to trigger soon enough.
+ * is going to trigger soon enough or the exit latency of the
+ * idle state in question is greater than the predicted idle
+ * duration.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->target_residency_ns <= data->next_timer_ns) {
+ s->target_residency_ns <= data->next_timer_ns &&
+ s->exit_latency_ns <= predicted_ns) {
predicted_ns = s->target_residency_ns;
idx = i;
break;
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 8d1c79aaca07..5993bcba9716 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
- clk_disable_unprepare(acry_dev->clk);
return rc;
}
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
- clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index a5b96adf2d1e..3b391a146635 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -3871,10 +3871,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
pdev = container_of(dev, struct pci_dev, dev);
if (pci_physfn(pdev) != qm->pdev) {
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ put_device(dev);
return -EINVAL;
}
*fun_index = pdev->devfn;
+ put_device(dev);
return 0;
}
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index d7a5539d07d4..bd2e282ca93a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -348,7 +348,7 @@ static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
struct resource res;
int nid, rc;
- res = DEFINE_RES(start, size, 0);
+ res = DEFINE_RES_MEM(start, size);
nid = phys_to_target_node(start);
rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 7c750599ea69..4bc484b46f43 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs,
{
struct cxl_feat_entry *feat;
+ if (!cxlfs || !cxlfs->entries)
+ return ERR_PTR(-EOPNOTSUPP);
+
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
if (uuid_equal(uuid, &feat->uuid))
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index d5f71eb1ade8..8128fd2b5b31 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1182,6 +1182,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc) {
+ xa_erase(&port->dports, (unsigned long)dport->dport_dev);
+ return ERR_PTR(rc);
+ }
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
get_device(dport_dev);
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
if (rc)
@@ -1200,18 +1214,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
cxl_debugfs_create_dport_dir(dport);
- /*
- * Setup port register if this is the first dport showed up. Having
- * a dport also means that there is at least 1 active link.
- */
- if (port->nr_dports == 1 &&
- port->component_reg_phys != CXL_RESOURCE_NONE) {
- rc = cxl_port_setup_regs(port, port->component_reg_phys);
- if (rc)
- return ERR_PTR(rc);
- port->component_reg_phys = CXL_RESOURCE_NONE;
- }
-
return dport;
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e14c1d305b22..41b64d871c5a 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -839,7 +839,7 @@ static int match_free_decoder(struct device *dev, const void *data)
}
static bool region_res_match_cxl_range(const struct cxl_region_params *p,
- struct range *range)
+ const struct range *range)
{
if (!p->res)
return false;
@@ -3398,10 +3398,7 @@ static int match_region_by_range(struct device *dev, const void *data)
p = &cxlr->params;
guard(rwsem_read)(&cxl_rwsem.region);
- if (p->res && p->res->start == r->start && p->res->end == r->end)
- return 1;
-
- return 0;
+ return region_res_match_cxl_range(p, r);
}
static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
@@ -3666,14 +3663,14 @@ static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
if (offset < p->cache_size) {
dev_err(&cxlr->dev,
- "Offset %#llx is within extended linear cache %pr\n",
+ "Offset %#llx is within extended linear cache %pa\n",
offset, &p->cache_size);
return -EINVAL;
}
region_size = resource_size(p->res);
if (offset >= region_size) {
- dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n",
+ dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n",
offset, &region_size);
return -EINVAL;
}
@@ -3705,6 +3702,7 @@ static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
if (validate_region_offset(cxlr, offset))
return -EINVAL;
+ offset -= cxlr->params.cache_size;
rc = region_offset_to_dpa_result(cxlr, offset, &result);
if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
dev_dbg(&cxlr->dev,
@@ -3737,6 +3735,7 @@ static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
if (validate_region_offset(cxlr, offset))
return -EINVAL;
+ offset -= cxlr->params.cache_size;
rc = region_offset_to_dpa_result(cxlr, offset, &result);
if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
dev_dbg(&cxlr->dev,
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index a53ec4798b12..a972e4ef1936 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -1068,7 +1068,7 @@ TRACE_EVENT(cxl_poison,
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
- __entry->hpa_alias0 = __entry->hpa +
+ __entry->hpa_alias0 = __entry->hpa -
cxlr->params.cache_size;
else
__entry->hpa_alias0 = ULLONG_MAX;
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3f78c56b58dc..39e6f93dc310 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return fence->ops->get_driver_name(fence);
+ return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 74c1f0ca95f2..a4153bcb6dcf 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
- if (!IS_ERR(pin)) {
- if (!dpll_pin_available(pin)) {
- nlmsg_free(msg);
- return -ENODEV;
- }
- ret = dpll_msg_add_pin_handle(msg, pin);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(pin)) {
+ nlmsg_free(msg);
+ return PTR_ERR(pin);
+ }
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
dpll = dpll_device_find_from_nlattr(info);
- if (!IS_ERR(dpll)) {
- ret = dpll_msg_add_dev_handle(msg, dpll);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(dpll)) {
+ nlmsg_free(msg);
+ return PTR_ERR(dpll);
+ }
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 93dc93eec79e..f93f9a458324 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
}
is_diff = zl3073x_out_is_diff(zldev, out);
- is_enabled = zl3073x_out_is_enabled(zldev, out);
+ is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
}
/* Skip N-pin if the corresponding input/output is differential */
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 103b2c2eba2a..0c5b94e64ea1 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1184,10 +1184,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
if (ret)
return ret;
- /* Verify OCRAM has been initialized */
+ /*
+ * Verify that OCRAM has been initialized.
+ * During a warm reset, OCRAM contents are retained, but the control
+ * and status registers are reset to their default values. Therefore,
+ * ECC must be explicitly re-enabled in the control register.
+ * Error condition: if INITCOMPLETEA is clear and ECC_EN is already set.
+ */
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
- (base + ALTR_A10_ECC_INITSTAT_OFST)))
- return -ENODEV;
+ (base + ALTR_A10_ECC_INITSTAT_OFST))) {
+ if (!ecc_test_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST)))
+ ecc_set_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST));
+ else
+ return -ENODEV;
+ }
/* Enable IRQ on Single Bit Error */
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
@@ -1357,7 +1369,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
@@ -1447,7 +1459,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_USB */
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
index 7c5db8bf0595..1a1092793092 100644
--- a/drivers/edac/versalnet_edac.c
+++ b/drivers/edac/versalnet_edac.c
@@ -433,7 +433,7 @@ static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
phys_addr_t pfn;
int err;
- if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
+ if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
return;
mci = priv->mci[ctl_num];
@@ -605,21 +605,23 @@ static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
length = result[MSG_ERR_LENGTH];
offset = result[MSG_ERR_OFFSET];
+ /*
+ * The data can come in two stretches. Construct the regs from two
+ * messages. The offset indicates the offset from which the data is to
+ * be taken.
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+
if (result[TOTAL_ERR_LENGTH] > length) {
if (!mc_priv->part_len)
mc_priv->part_len = length;
else
mc_priv->part_len += length;
- /*
- * The data can come in 2 stretches. Construct the regs from 2
- * messages the offset indicates the offset from which the data is to
- * be taken
- */
- for (i = 0 ; i < length; i++) {
- k = offset + i;
- j = ERROR_DATA + i;
- mc_priv->regs[k] = result[j];
- }
+
if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
return 0;
mc_priv->part_len = 0;
@@ -705,7 +707,7 @@ static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
/* Convert to bytes */
length = result[TOTAL_ERR_LENGTH] * 4;
log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
- sec_sev, (void *)&result[ERROR_DATA], length);
+ sec_sev, (void *)&mc_priv->regs, length);
return 0;
}
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index e5e0174a0335..66e1106db5e7 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -577,6 +577,8 @@ void fw_card_initialize(struct fw_card *card,
INIT_LIST_HEAD(&card->transactions.list);
spin_lock_init(&card->transactions.lock);
+ spin_lock_init(&card->topology_map.lock);
+
card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 2f73bcd5696f..ed3ae8cdb0cd 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -441,12 +441,13 @@ static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_nod
const u32 *self_ids, int self_id_count)
{
__be32 *map = buffer;
+ u32 next_generation = be32_to_cpu(buffer[1]) + 1;
int node_count = (root_node_id & 0x3f) + 1;
memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
+ *map++ = cpu_to_be32(next_generation);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index dd3656a0c1ff..c65f491c54d0 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
-__must_hold(&card->transactions_lock)
+__must_hold(&card->transactions.lock)
{
int tlabel;
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 65bf1685350a..c72ee4756585 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
+static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
+{
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
+ memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
+}
+
+static void
+ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
+{
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
+ mem_region->ep_mem_size = 0;
+ } else {
+ mem_region->ep_mem_size = ffa_emad_size_get(version);
+ mem_region->ep_mem_offset = sizeof(*mem_region);
+ memset(mem_region->reserved, 0, 12);
+ }
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
- ep_mem_access = buffer +
- ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
- for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ for (idx = 0; idx < args->nattrs; idx++) {
+ ep_mem_access = buffer +
+ ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
+ ffa_emad_impdef_value_init(drv_info->version,
+ ep_mem_access->impdef_val,
+ args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
- if (drv_info->version <= FFA_VERSION_1_0) {
- mem_region->ep_mem_size = 0;
- } else {
- mem_region->ep_mem_size = sizeof(*ep_mem_access);
- mem_region->ep_mem_offset = sizeof(*mem_region);
- memset(mem_region->reserved, 0, 12);
- }
+ ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 07b9e629276d..7c35c95fddba 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
-static inline void scmi_inc_count(atomic_t *arr, int stat)
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
+};
+
+static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_inc(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_inc(&dbg->counters[stat]);
+ }
}
-static inline void scmi_dec_count(atomic_t *arr, int stat)
+static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_dec(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_dec(&dbg->counters[stat]);
+ }
}
enum scmi_bad_msg {
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index bd56a877fdfc..5caa9191a8d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -116,22 +116,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
- * struct scmi_debug_info - Debug common info
- * @top_dentry: A reference to the top debugfs dentry
- * @name: Name of this SCMI instance
- * @type: Type of this SCMI instance
- * @is_atomic: Flag to state if the transport of this instance is atomic
- * @counters: An array of atomic_c's used for tracking statistics (if enabled)
- */
-struct scmi_debug_info {
- struct dentry *top_dentry;
- const char *name;
- const char *type;
- bool is_atomic;
- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
- scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
- scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
+ xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
+ scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
+ scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
+ scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
+ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
- scmi_inc_count(info->dbg->counters, RESPONSE_OK);
+ scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
+ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
+ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
+ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
- scmi_inc_count(info->dbg->counters, SENT_FAIL);
+ scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
- scmi_inc_count(info->dbg->counters, SENT_OK);
+ scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
- if (!info->dbg)
- return -EINVAL;
-
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
- if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
+ if (!info->dbg)
+ return 0;
+
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ffe7e1cb6b23..fe5c10cd5c32 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 37600faf4a4b..416f265d09d0 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -723,6 +723,7 @@ struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->set_config = gpio_fwd_set_config;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 0103be977c66..4fbae6f6a497 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
+ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
+ gpio_config.fixed_direction_output = fixed_direction_output;
+
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 3b4f8830c741..f32d1d237795 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i, irq;
+ int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
- packet->item[i].index);
- if (!irq) {
- dev_err(ljca_gpio->gc.parent,
- "gpio_id %u does not mapped to IRQ yet\n",
- packet->item[i].index);
- return;
- }
-
- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 476cea1b5ed7..9d28ca8e1d6f 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index ab9e4077fa60..f4267af00027 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -31,6 +31,7 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+ unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
int regmap_irq_line;
@@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->fixed_direction_output) {
+ if (test_bit(offset, gpio->fixed_direction_output))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
goto err_free_gpio;
}
+ if (config->fixed_direction_output) {
+ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
+ GFP_KERNEL);
+ if (!gpio->fixed_direction_output) {
+ ret = -ENOMEM;
+ goto err_free_gpio;
+ }
+ bitmap_copy(gpio->fixed_direction_output,
+ config->fixed_direction_output, chip->ngpio);
+ }
+
/* if not set, assume there is only one register */
gpio->ngpio_per_reg = config->ngpio_per_reg;
if (!gpio->ngpio_per_reg)
@@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
- goto err_free_gpio;
+ goto err_free_bitmap;
#ifdef CONFIG_REGMAP_IRQ
if (config->regmap_irq_chip) {
@@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
config->regmap_irq_line, config->regmap_irq_flags,
0, config->regmap_irq_chip, &gpio->irq_chip_data);
if (ret)
- goto err_free_gpio;
+ goto err_free_bitmap;
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
} else
@@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
+err_free_bitmap:
+ bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
#endif
gpiochip_remove(&gpio->gpio_chip);
+ bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 09a448ce3eec..3c8fd322a713 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -50,25 +50,6 @@ static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
return ioread32(gpio->base + offs);
}
-static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
- u32 val)
-{
- iowrite32(val, gpio->base + offs);
-}
-
-static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
- u32 mask, u32 val)
-{
- u32 r;
-
- guard(gpio_generic_lock_irqsave)(&gpio->chip);
-
- r = tb10x_reg_read(gpio, offs);
- r = (r & ~mask) | (val & mask);
-
- tb10x_reg_write(gpio, offs, r);
-}
-
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 284e762d92c4..d441c1236d8c 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
+static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
+ unsigned int acpi_debounce)
+{
+ int ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ acpi_debounce *= 10;
+ ret = gpio_set_debounce_timeout(desc, acpi_debounce);
+ if (ret)
+ gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
+ acpi_debounce, ret);
+}
+
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
- int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
+ acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
+ "Failed to request GPIO for pin 0x%04X, err %pe\n",
+ pin, desc);
return AE_OK;
}
@@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
- int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
- if (ret)
- return ERR_PTR(ret);
+ acpi_gpio_set_debounce_timeout(desc, info.debounce);
return desc;
}
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index f21dbc28cf2c..e3806db1c0e0 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -41,7 +41,7 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
!strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
return ERR_PTR(-ENOENT);
- gdev = gpio_device_find_by_label(gdev_node->name);
+ gdev = gpio_device_find_by_fwnode(fwnode);
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9952e412da50..cd8800ba5825 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -5296,6 +5296,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
struct gpio_device *gdev;
loff_t index = *pos;
+ s->private = NULL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
@@ -5329,7 +5331,11 @@ static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void gpiolib_seq_stop(struct seq_file *s, void *v)
{
- struct gpiolib_seq_priv *priv = s->private;
+ struct gpiolib_seq_priv *priv;
+
+ priv = s->private;
+ if (!priv)
+ return;
srcu_read_unlock(&gpio_devices_srcu, priv->idx);
kfree(priv);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4b2f7d794275..da2565e6de71 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -245,7 +245,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a2ca9acf8c4e..923f0fa7350c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1267,6 +1267,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ /* VM entity stopped if process killed, don't clear freed pt bo */
+ if (!amdgpu_vm_ready(vm))
+ return 0;
+
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index ef996493115f..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f5d5c45ddc0d..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3d032c4e2dce..2819aceaab74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5243,10 +5243,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- r = amdgpu_dpm_notify_rlc_state(adev, false);
- if (r)
- return r;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8561ad7f6180..ed3bef1edfe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -82,6 +82,18 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 61268aa82df4..7333e19291cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2632,9 +2632,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 9cddbf50442a..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size,
if (ret)
return ret;
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8c0e5d03de50..aa7987d0806c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2355,8 +2355,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
- } else
+ } else {
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
+ }
mutex_lock(&psp->securedisplay_context.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 761bad98da3e..4d0096d0baa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -151,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
+ unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -174,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
- spin_unlock(&fence_drv->fence_list_lock);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 474bfe36c0c2..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
+
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 811124ff88a8..f9e2edf5260b 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return -EINVAL;
}
- if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
+ !adev->in_suspend)
flags |= AMDGPU_XCP_OPS_KFD;
if (flags & AMDGPU_XCP_OPS_KFD) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
index 96616a865aac..ed1e25661706 100644
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7693b7953426..80565392313f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3102,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 5976ed55d9db..2b7aba22ecc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4399,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
gfx_v7_0_gpu_early_init(adev);
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 0856ff65288c..8a81713d97aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2023,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 77f9d5b9a556..c90cbe053ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -2292,7 +2292,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
- if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ if (adev->in_suspend)
+ amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
+ else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index baf097d2e1ac..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 64b240b51f1a..a9be7a505026 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
return err;
}
-static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg1, sol_reg2;
+ int retry_loop;
+ /* Wait for the TOS to be unloaded */
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ usleep_range(1000, 2000);
+ sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg1 == sol_reg2)
+ return 0;
+ }
+ dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
+
+ return -ETIME;
+}
+
+static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
+ /* For a reset done at the end of S3, only wait for TOS to be unloaded */
+ if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ return psp_v11_wait_for_tos_unload(psp);
+
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a65c67cf56ff..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d72411c3379..74a1d3e1d52b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3687,6 +3687,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6597475e245d..91c0188a29b2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
- struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
@@ -3563,6 +3563,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3572,7 +3573,14 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
@@ -8030,7 +8038,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
"mode %dx%d@%dHz is not native, enabling scaling\n",
adjusted_mode->hdisplay, adjusted_mode->vdisplay,
drm_mode_vrefresh(adjusted_mode));
- dm_new_connector_state->scaling = RMX_FULL;
+ dm_new_connector_state->scaling = RMX_ASPECT;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 1ec9d03ad747..38f9ea313dcb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f263e1a4537e..00dac862b665 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1302,7 +1302,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fe100e4c9801..cc21337a182f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 09be2a90cc79..4f569cd8a5d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7c276c319086..ce3d0b45fb4c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..62a39204fe0b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,7 +44,13 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 9e33bf937a69..2676ae9f6fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 85303167a553..1173c53359b0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -1141,6 +1141,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
else if (dc_is_dvi_signal(sink->sink_signal) &&
+ dc_is_dvi_signal(link->connector_signal) &&
aud_support->hdmi_audio_native &&
sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ce421bcddcb0..1aae46d703ba 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
+
+ /*
+ * If VRR is inactive, set vtotal min and max to nominal vtotal
+ */
+ if (in_out_vrr->state == VRR_STATE_INACTIVE) {
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(stream,
+ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
+ return;
+ }
}
unsigned long long mod_freesync_calc_nominal_field_rate(
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
index 086869264425..a252ee4c7874 100644
--- a/drivers/gpu/drm/amd/include/amd_cper.h
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 518d07afc7df..bc29a923fa6e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (pp_funcs && pp_funcs->notify_rlc_state) {
- mutex_lock(&adev->pm.mutex);
-
- ret = pp_funcs->notify_rlc_state(
- adev->powerplay.pp_handle,
- en);
-
- mutex_unlock(&adev->pm.mutex);
- }
-
- return ret;
-}
-
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b5fbb0fd1dc0..a7e6d7854b7b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -4724,14 +4724,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = devm_device_add_group(adev->dev,
&amdgpu_pm_policy_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
}
if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
ret = devm_device_add_group(adev->dev,
&amdgpu_board_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
(void *)&tmp) != -EOPNOTSUPP) {
sysfs_add_file_to_group(&adev->dev->kobj,
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65c1d98af26c..af48aead12f7 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -424,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
-
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index d2dbd90bb427..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 1f50f1e74c48..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fb8086859857..244b8c364d45 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2040,6 +2040,12 @@ static int smu_disable_dpms(struct smu_context *smu)
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
return 0;
+ /* vangogh s0ix */
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
+ adev->in_s0ix)
+ return 0;
+
/*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2c9869feba61..0708d0f0938b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -2217,6 +2217,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+ if (adev->in_s0ix)
+ return 0;
+
/* allow message will be sent after enable message on Vangogh*/
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f532f7c69259..a8961a8f5c42 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index c15aef014f69..d41bd876167c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
-static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
- u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
- tmp |= val;
- __ast_write8_i(addr, reg, index, tmp);
+ val &= ~preserve_mask;
+ __ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index d502d146b177..56638814bb28 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -280,7 +280,7 @@ sanity:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
- |
set -eu
image_tags=(
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index 72480db1f00d..515aceac22b1 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -13,8 +13,8 @@
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
- "Choose which drm client to start, default is"
- CONFIG_DRM_CLIENT_DEFAULT "]");
+ "Choose which drm client to start, default is "
+ CONFIG_DRM_CLIENT_DEFAULT);
/**
* drm_client_setup() - Setup in-kernel DRM clients
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index ebf305fb24f0..6fb55601252f 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
- drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ if (shadow_plane_state) {
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ } else {
+ __drm_atomic_helper_plane_reset(plane, NULL);
+ }
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 1d6312fa1429..d4b6ea42db0f 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
+/*
+ * Special case if the pixel crosses page boundaries
+ */
+static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
+ unsigned int offset, u32 color)
+{
+ u8 *vaddr2;
+ u8 *p = vaddr + offset;
+
+ vaddr2 = kmap_local_page_try_from_panic(next_page);
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 1)
+ p = vaddr2;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 2)
+ p = vaddr2;
+
+ *p = color & 0xff;
+ kunmap_local(vaddr2);
+}
+
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- if (vaddr)
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, fg32);
+ else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- drm_panic_write_pixel(vaddr, offset, color, cpp);
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, color);
+ else
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
+ if (rect->x2 > sb->width || rect->y2 > sb->height)
+ return;
+
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
- struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
+ struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
- if (!font)
+ if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
- v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+ v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
+ if (v_margin < 0)
+ return -ENOSPC;
+ v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index b13a17276d07..88385dc3b30d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
- unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e58c0c158b3a..b91575380708 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -413,7 +413,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 77a0199f9ea5..4a4cace1f879 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static bool fixup_dmc_evt(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg_ctl, u32 *data_ctl,
+ i915_reg_t reg_htp, u32 *data_htp)
+{
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
+ return false;
+
+ if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
+ return false;
+
+ /* make sure reg_ctl and reg_htp are for the same event */
+ if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
+ i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
+ return false;
+
+ /*
+ * On ADL-S the HRR event handler is not restored after DC6.
+ * Clear it to zero from the beginning to avoid mismatches later.
+ */
+ if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl = 0;
+ *data_htp = 0;
+ return true;
+ }
+
+ return false;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+ }
+
+ for (i = 0; i < mmio_count - 1; i++) {
+ u32 orig_mmiodata[2] = {
+ dmc_info->mmiodata[i],
+ dmc_info->mmiodata[i+1],
+ };
+
+ if (!fixup_dmc_evt(display, dmc_id,
+ dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
+ dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
+ continue;
+
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
+ orig_mmiodata[0], dmc_info->mmiodata[i]);
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
+ i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
+ orig_mmiodata[1], dmc_info->mmiodata[i+1]);
+ }
+ for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
- i, mmioaddr[i], mmiodata[i],
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b817ff44c043..c48384e58ea1 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err_free_panic;
+ }
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
@@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
- struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
- panic = intel_panic_alloc();
- if (!panic) {
- kfree(intel_fb);
- return NULL;
- }
-
- intel_fb->panic = panic;
-
return intel_fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 10eb93a34cf2..4619237f1346 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -585,6 +585,10 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
int ret;
+ /* TODO: Enable Panel Replay on MST once it's properly implemented. */
+ if (intel_dp->mst_detect == DRM_DP_MST)
+ return;
+
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
if (ret < 0)
@@ -888,7 +892,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
u32 current_dc_state = intel_display_power_get_current_dc_state(display);
- struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
current_dc_state != DC_STATE_EN_UPTO_DC6) ||
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 88b147fa5cb1..c90b35881a26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -205,7 +205,7 @@ static u64 div_u64_roundup(u64 nom, u32 den)
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
+ return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency);
}
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
@@ -215,7 +215,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
+ return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC);
}
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 25e97031d76e..30d5889fc809 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1595,8 +1595,20 @@ err_unlock:
err_vma_res:
i915_vma_resource_free(vma_res);
err_fence:
- if (work)
- dma_fence_work_commit_imm(&work->base);
+ if (work) {
+ /*
+ * When pinning VMA to GGTT on CHV or BXT with VTD enabled,
+ * commit VMA binding asynchronously to avoid risk of lock
+ * inversion among reservation_ww locks held here and
+ * cpu_hotplug_lock acquired from stop_machine(), which we
+ * wrap around GGTT updates when running in those environments.
+ */
+ if (i915_vma_is_ggtt(vma) &&
+ intel_vm_no_concurrent_access_wa(vma->vm->i915))
+ dma_fence_work_commit(&work->base);
+ else
+ dma_fence_work_commit_imm(&work->base);
+ }
err_rpm:
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
index 682dd2633d0c..0482bfcefdde 100644
--- a/drivers/gpu/drm/imagination/Kconfig
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -7,6 +7,7 @@ config DRM_POWERVR
depends on DRM
depends on MMU
depends on PM
+ depends on POWER_SEQUENCING || !POWER_SEQUENCING
select DRM_EXEC
select DRM_GEM_SHMEM_HELPER
select DRM_SCHED
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 6d8325c76697..7fc6af703307 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -25,19 +25,18 @@
struct imx_parallel_display_encoder {
struct drm_encoder encoder;
- struct drm_bridge bridge;
- struct imx_parallel_display *pd;
};
struct imx_parallel_display {
struct device *dev;
u32 bus_format;
struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
};
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
+ return container_of(b, struct imx_parallel_display, bridge);
}
static const u32 imx_pd_bus_fmts[] = {
@@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(imxpd_encoder))
return PTR_ERR(imxpd_encoder);
- imxpd_encoder->pd = imxpd;
encoder = &imxpd_encoder->encoder;
- bridge = &imxpd_encoder->bridge;
+ bridge = &imxpd->bridge;
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
- bridge->funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
connector = drm_bridge_connector_init(drm, encoder);
@@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
u32 bus_format = 0;
const char *fmt;
- imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
+ imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
+ &imx_pd_bridge_funcs);
+ if (IS_ERR(imxpd))
+ return PTR_ERR(imxpd);
/* port@1 is the output port */
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
@@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxpd);
+ devm_drm_bridge_add(dev, &imxpd->bridge);
+
return component_add(dev, &imx_pd_ops);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index bc7527542fdc..c4c6d0249df5 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -283,6 +283,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
unsigned int i;
unsigned long flags;
+ /* release GCE HW usage and start autosuspend */
+ pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
+ pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
+
if (data->sta < 0)
return;
@@ -618,6 +622,9 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
+ goto update_config_out;
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
goto update_config_out;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index eb5537f0ac90..31ff2922758a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -686,10 +686,6 @@ err_free:
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
err_put_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
return ret;
}
@@ -697,18 +693,12 @@ err_put_dev:
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
- int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
-
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 02349bd44001..788b52c1d10c 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -21,9 +21,6 @@
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_INVALID,
};
@@ -71,26 +68,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
- if (modifier == DRM_FORMAT_MOD_LINEAR)
- return true;
-
- if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
- AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE))
- return false;
-
- if (format != DRM_FORMAT_XRGB8888 &&
- format != DRM_FORMAT_ARGB8888 &&
- format != DRM_FORMAT_BGRX8888 &&
- format != DRM_FORMAT_BGRA8888 &&
- format != DRM_FORMAT_ABGR8888 &&
- format != DRM_FORMAT_XBGR8888 &&
- format != DRM_FORMAT_RGB888 &&
- format != DRM_FORMAT_BGR888)
- return false;
-
- return true;
+ return modifier == DRM_FORMAT_MOD_LINEAR;
}
static void mtk_plane_destroy_state(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index fc62fef2fed8..4e6dc16e4a4c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
+#define NEXT_BLK(blk) \
+ ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
+
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
- blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index afaa3cfefd35..4b5a4edd0702 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
-static bool
-adreno_smmu_has_prr(struct msm_gpu *gpu)
-{
- struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
- return adreno_smmu && adreno_smmu->set_prr_addr;
-}
-
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4b970a59deaf..2f8156051d9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
dpu_kms->perf.perf_cfg);
+ if (dpu_kms->catalog->caps->has_3d_merge)
+ adjusted_mode_clk /= 2;
+
/*
* The given mode, adjusted for the perf clock factor, should not exceed
* the max core clock rate
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 6641455c4ec6..9f8d1bba9139 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x200, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.base = 0x320, .len = 0x100,}, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f54cf0faa1c7..905524ceeb1f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
+ uint32_t w = src_w, h = src_h;
+
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
- src_w /= chroma_subsmpl_h;
- src_h /= chroma_subsmpl_v;
+ w /= chroma_subsmpl_h;
+ h /= chroma_subsmpl_v;
}
- pixel_ext->num_ext_pxls_top[i] = src_h;
- pixel_ext->num_ext_pxls_left[i] = src_w;
+ pixel_ext->num_ext_pxls_top[i] = h;
+ pixel_ext->num_ext_pxls_left[i] = w;
}
}
@@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
* We already have verified scaling against platform limitations.
* Now check if the SSPP supports scaling at all.
*/
- if (!sblk->scaler_blk.len &&
+ if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
((drm_rect_width(&new_plane_state->src) >> 16 !=
drm_rect_width(&new_plane_state->dst)) ||
(drm_rect_height(&new_plane_state->src) >> 16 !=
@@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
state, plane_state,
prev_adjacent_plane_state);
if (ret)
- break;
+ return ret;
prev_adjacent_plane_state = plane_state;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 2c77c74fac0f..d9c3b0a1d091 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
- if (!hw_sspp && reqs->scale)
+ if (!hw_sspp && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index cd73468e369a..7545c0293efb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, dpu_wb_conn->maxlinewidth);
return -EINVAL;
+ } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
+ return -EINVAL;
}
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index e391505fdaf0..3cbf08231492 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -109,7 +109,6 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
- void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 32f06edd21a9..c5e1d2016bcc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
u32 data;
spin_lock_irqsave(&pll->pll_enable_lock, flags);
- if (pll->pll_enable_cnt++) {
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
- WARN_ON(pll->pll_enable_cnt == INT_MAX);
- return;
- }
+ pll->pll_enable_cnt++;
+ WARN_ON(pll->pll_enable_cnt == INT_MAX);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
@@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
- phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
- struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
- unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
- spin_lock_irqsave(&pll->pll_enable_lock, flags);
- pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
- struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
- unsigned long flags;
u32 data;
DBG("");
@@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
- spin_lock_irqsave(&pll->pll_enable_lock, flags);
- pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* make sure phy is turned off */
wmb();
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 07d8cdd6bb2e..9f7fbe577abb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (obj->resv != &obj->_resv) {
+ /*
+ * In error paths, we could end up here before msm_gem_new_handle()
+ * has changed obj->resv to point to the shared resv. In this case,
+ * we don't want to drop a ref to the shared r_obj that we haven't
+ * taken yet.
+ */
+ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
- WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
-
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3ab3b27134f9..75d9f3574370 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
submit->user_fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
+
return;
}
@@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
-
- last_fence = vm->last_fence;
- vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
- dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 8316af1723c2..89a95977f41e 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -971,6 +971,7 @@ static int
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
{
struct drm_device *dev = job->vm->drm;
+ struct msm_drm_private *priv = dev->dev_private;
int i = job->nr_ops++;
int ret = 0;
@@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
break;
}
+ if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
+ !adreno_smmu_has_prr(priv->gpu)) {
+ ret = UERR(EINVAL, dev, "PRR not supported\n");
+ }
+
return ret;
}
@@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
* Maybe we could allow just UNMAP ops? OTOH userspace should just
* immediately close the device file and all will be torn down.
*/
- if (to_msm_vm(ctx->vm)->unusable)
+ if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
return UERR(EPIPE, dev, "context is unusable");
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index a597f2bee30b..2894fc118485 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
+static inline bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+
+ if (!adreno_smmu)
+ return false;
+
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 0e18619f96cb..a188617653e8 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
+ kfree(p->pages);
+ p->pages = NULL;
p->count = ret;
return -ENOMEM;
}
@@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
+ if (!p->pages)
+ return;
+
if (p->count > 0)
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e97e39abf3a2..12b1dba8e05d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2867,7 +2867,9 @@ nv50_display_create(struct drm_device *dev)
}
/* Assign the correct format modifiers */
- if (disp->disp->object.oclass >= TU102_DISP)
+ if (disp->disp->object.oclass >= GB202_DISP)
+ nouveau_display(dev)->format_modifiers = wndwca7e_modifiers;
+ else if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 15f9242b72ac..5d998f0319dc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -104,4 +104,5 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
+extern const u64 wndwca7e_modifiers[];
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index e2c55f4b9c5a..ef9e410babbf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -786,13 +786,14 @@ nv50_wndw_destroy(struct drm_plane *plane)
}
/* This function assumes the format has already been validated against the plane
- * and the modifier was validated against the device-wides modifier list at FB
+ * and the modifier was validated against the device-wide modifier list at FB
* creation time.
*/
static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ const struct drm_format_info *info = drm_format_info(format);
uint8_t i;
/* All chipsets can display all formats in linear layout */
@@ -800,13 +801,32 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
return true;
if (drm->client.device.info.chipset < 0xc0) {
- const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
if (!format) return false;
for (i = 0; i < info->num_planes; i++)
if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ } else if (drm->client.device.info.chipset >= 0x1b2) {
+ const uint8_t slayout = ((modifier >> 22) & 0x1) |
+ ((modifier >> 25) & 0x6);
+
+ if (!format)
+ return false;
+
+ /*
+ * Note in practice this implies only formats where cpp is equal
+ * for each plane, or >= 4 for all planes, are supported.
+ */
+ for (i = 0; i < info->num_planes; i++) {
+ if (((info->cpp[i] == 2) && slayout != 3) ||
+ ((info->cpp[i] == 1) && slayout != 2) ||
+ ((info->cpp[i] >= 4) && slayout != 1))
+ return false;
+
+ /* 24-bit not supported. It has yet another layout */
+ WARN_ON(info->cpp[i] == 3);
+ }
}
return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
index 0d8e9a9d1a57..2cec8cfbd546 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -179,6 +179,39 @@ wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwca7e_modifiers[] = { /* | | | | | */
+ /* 4cpp+ modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ /* 1cpp/8bpp modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 5),
+ /* 2cpp/16bpp modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 5),
+ /* All formats support linear */
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const struct nv50_wndw_func
wndwca7e = {
.acquire = wndwc37e_acquire,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index e60f7892f5ce..a7bf539e5d86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
+static bool
+nouveau_sched_job_list_empty(struct nouveau_sched *sched)
+{
+ bool empty;
+
+ spin_lock(&sched->job.list.lock);
+ empty = list_empty(&sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
+ return empty;
+}
static void
nouveau_sched_fini(struct nouveau_sched *sched)
@@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+ wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 2fc7b0779b37..893af9b16756 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
&kingdisplay_panel_funcs,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 04d91929eedd..d5f821d6b23c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
+/*
+ * The mode data for this panel has been reverse engineered without access
+ * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
+ * other panels results in garbage data on the display.
+ */
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
@@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
- .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 156c7a0b62a2..3f43686f0195 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -288,6 +288,23 @@ panthor_gem_create_with_handle(struct drm_file *file,
panthor_gem_debugfs_set_usage_flags(bo, 0);
+ /* If this is a write-combine mapping, we query the sgt to force a CPU
+ * cache flush (dma_map_sgtable() is called when the sgt is created).
+ * This ensures the zero-ing is visible to any uncached mapping created
+ * by vmap/mmap.
+ * FIXME: Ideally this should be done when pages are allocated, not at
+ * BO creation time.
+ */
+ if (shmem->map_wc) {
+ struct sg_table *sgt;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto out_put_gem;
+ }
+ }
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -296,6 +313,7 @@ panthor_gem_create_with_handle(struct drm_file *file,
if (!ret)
*size = bo->base.base.size;
+out_put_gem:
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6dec4354e378..7870e7dbaa5d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
- /* Partial unmaps might trigger a remap with either a prev or a next VA,
- * but not both.
+ /* Two VMAs can be needed for an unmap, as an unmap can happen
+ * in the middle of a drm_gpuva, requiring a remap with both
+ * prev & next VA. Or an unmap can span more than one drm_gpuva
+ * where the first and last ones are covered partially, requring
+ * a remap for the first with a prev VA and remap for the last
+ * with a next VA.
*/
- vma_count = 1;
+ vma_count = 2;
break;
default:
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e821d67af7..9c8907bc61d9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
+ return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
-err_agp:
+err:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(ddev);
return ret;
}
static void
-radeon_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- /* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown
- */
- if (radeon_device_is_virtual())
- radeon_pci_remove(pdev);
-
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 645e33bf7947..ba1446acd703 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
- kfree(rdev);
dev->dev_private = NULL;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7b613997bb50..727cdf768161 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
- FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
+ FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 5a4697f636f2..fe174a4857be 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ entity->last_user = current->group_leader;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
@@ -172,26 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_error);
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb);
+
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
-
- drm_sched_fence_scheduled(job->s_fence, NULL);
- drm_sched_fence_finished(job->s_fence, -ESRCH);
- WARN_ON(job->s_fence->parent);
- job->sched->ops->free_job(job);
-}
-
-/* Signal the scheduler finished fence when the entity in question is killed. */
-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
- finish_cb);
+ struct dma_fence *f;
unsigned long index;
- dma_fence_put(f);
-
/* Wait for all dependencies to avoid data corruptions */
xa_for_each(&job->dependencies, index, f) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
@@ -219,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
dma_fence_put(f);
}
+ drm_sched_fence_scheduled(job->s_fence, NULL);
+ drm_sched_fence_finished(job->s_fence, -ESRCH);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+/* Signal the scheduler finished fence when the entity in question is killed. */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+
+ dma_fence_put(f);
+
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
@@ -302,7 +307,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
- if ((!last_user || last_user == current->group_leader) &&
+ if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
@@ -552,10 +557,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
+
+ spin_unlock(&entity->lock);
}
/**
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 7d9e85e932d7..f0e72d4b6a47 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -85,6 +85,7 @@ config DRM_PANEL_MIPI_DBI
config DRM_PIXPAPER
tristate "DRM support for PIXPAPER display panels"
depends on DRM && SPI
+ depends on MMU
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
index 718832b08d96..c46f17ba7236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -100,8 +100,10 @@ vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
if (vmw->has_mob) {
if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
return VMW_CURSOR_UPDATE_MOB;
+ else
+ return VMW_CURSOR_UPDATE_GB_ONLY;
}
-
+ drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n");
return VMW_CURSOR_UPDATE_NONE;
}
@@ -139,6 +141,7 @@ static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
{
switch (update_type) {
case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_NONE:
return 0;
case VMW_CURSOR_UPDATE_MOB:
@@ -623,6 +626,7 @@ int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
if (!surface || vps->cursor.legacy.id == surface->snooper.id)
vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_MOB: {
bo = vmw_user_object_buffer(&vps->uo);
if (bo) {
@@ -737,6 +741,7 @@ void
vmw_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vmw_bo *bo;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_state =
@@ -762,6 +767,15 @@ vmw_cursor_plane_atomic_update(struct drm_plane *plane,
case VMW_CURSOR_UPDATE_MOB:
vmw_cursor_update_mob(dev_priv, vps);
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo)
+ vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
+ vps->base.crtc_w,
+ vps->base.crtc_h,
+ vps->base.hotspot_x,
+ vps->base.hotspot_y);
+ break;
case VMW_CURSOR_UPDATE_NONE:
/* do nothing */
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
index 40694925a70e..0c2cc0699b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -33,6 +33,7 @@ static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
enum vmw_cursor_update_type {
VMW_CURSOR_UPDATE_NONE = 0,
VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_GB_ONLY,
VMW_CURSOR_UPDATE_MOB,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d539f25b5fbe..3057f8baa7d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3668,6 +3668,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id = header->id;
+ if (header->size > SVGA_CMD_MAX_DATASIZE) {
+ VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -E2BIG;
+ }
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 7de20e56082c..fd4e76486f2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -32,22 +32,22 @@ enum vmw_bo_dirty_method {
/**
* struct vmw_bo_dirty - Dirty information for buffer objects
+ * @ref_count: Reference count for this structure. Must be first member!
* @start: First currently dirty bit
* @end: Last currently dirty bit + 1
* @method: The currently used dirty method
* @change_count: Number of consecutive method change triggers
- * @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
struct vmw_bo_dirty {
+ struct kref ref_count;
unsigned long start;
unsigned long end;
enum vmw_bo_dirty_method method;
unsigned int change_count;
- unsigned int ref_count;
unsigned long bitmap_size;
unsigned long bitmap[];
};
@@ -221,7 +221,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
int ret;
if (dirty) {
- dirty->ref_count++;
+ kref_get(&dirty->ref_count);
return 0;
}
@@ -235,7 +235,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
- dirty->ref_count = 1;
+ kref_init(&dirty->ref_count);
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
@@ -274,10 +274,8 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- if (dirty && --dirty->ref_count == 0) {
- kvfree(dirty);
+ if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
vbo->dirty = NULL;
- }
}
/**
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 51f2a03847f9..f680c8b8f258 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -168,6 +168,7 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define FAST_CLEAR_VALIGN_FIX REG_BIT(13)
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 34d33965eac2..456899238377 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -988,16 +988,16 @@ void xe_device_shutdown(struct xe_device *xe)
drm_dbg(&xe->drm, "Shutting down device\n");
- if (xe_driver_flr_disabled(xe)) {
- xe_display_pm_shutdown(xe);
+ xe_display_pm_shutdown(xe);
- xe_irq_suspend(xe);
+ xe_irq_suspend(xe);
- for_each_gt(gt, xe, id)
- xe_gt_shutdown(gt);
+ for_each_gt(gt, xe, id)
+ xe_gt_shutdown(gt);
- xe_display_pm_shutdown_late(xe);
- } else {
+ xe_display_pm_shutdown_late(xe);
+
+ if (!xe_driver_flr_disabled(xe)) {
/* BOOM! */
__xe_driver_flr(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 7715e74bb945..a8ab363a8046 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -165,7 +165,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
- &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
+ &syncs_user[num_syncs], NULL, 0,
+ SYNC_PARSE_FLAG_EXEC |
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 37b2b93b73d6..cb5f204c08ed 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -10,6 +10,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
#include "xe_dep_scheduler.h"
@@ -324,6 +325,16 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
}
xe_vm_put(migrate_vm);
+ if (!IS_ERR(q)) {
+ int err = drm_syncobj_create(&q->ufence_syncobj,
+ DRM_SYNCOBJ_CREATE_SIGNALED,
+ NULL);
+ if (err) {
+ xe_exec_queue_put(q);
+ return ERR_PTR(err);
+ }
+ }
+
return q;
}
ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
@@ -333,6 +344,9 @@ void xe_exec_queue_destroy(struct kref *ref)
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
+ if (q->ufence_syncobj)
+ drm_syncobj_put(q->ufence_syncobj);
+
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 27b76cf9da89..df1c69dc81f1 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -15,6 +15,7 @@
#include "xe_hw_fence_types.h"
#include "xe_lrc_types.h"
+struct drm_syncobj;
struct xe_execlist_exec_queue;
struct xe_gt;
struct xe_guc_exec_queue;
@@ -155,6 +156,12 @@ struct xe_exec_queue {
struct list_head link;
} pxp;
+ /** @ufence_syncobj: User fence syncobj */
+ struct drm_syncobj *ufence_syncobj;
+
+ /** @ufence_timeline_value: User fence timeline value */
+ u64 ufence_timeline_value;
+
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7fdd0a97a628..5edc0cad47e2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ if (!ggtt->wq)
+ return -ENOMEM;
+
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3e0ad7e5b5df..6d3db5e55d98 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)
unsigned int fw_ref;
int err;
- if (xe_device_wedged(gt_to_xe(gt)))
- return -ECANCELED;
+ if (xe_device_wedged(gt_to_xe(gt))) {
+ err = -ECANCELED;
+ goto err_pm_put;
+ }
/* We only support GT resets with GuC submission */
- if (!xe_device_uc_enabled(gt_to_xe(gt)))
- return -ENODEV;
+ if (!xe_device_uc_enabled(gt_to_xe(gt))) {
+ err = -ENODEV;
+ goto err_pm_put;
+ }
xe_gt_info(gt, "reset started\n");
@@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)
if (!err)
xe_gt_warn(gt, "reset block failed to get lifted");
- xe_pm_runtime_get(gt_to_xe(gt));
-
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@@ -874,6 +876,7 @@ err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
+err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
return err;
@@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
- queue_work(gt->ordered_wq, &gt->reset.worker);
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+ if (!queue_work(gt->ordered_wq, &gt->reset.worker))
+ xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 18f6327bf552..283d846c3512 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -200,6 +200,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ cancel_work_sync(&ct->dead.worker);
+#endif
ct_exit_safe_mode(ct);
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index a4894eb0d7f3..125698a9ecf1 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -10,6 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -1389,7 +1390,9 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
return 0;
}
-static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+static int xe_oa_parse_syncs(struct xe_oa *oa,
+ struct xe_oa_stream *stream,
+ struct xe_oa_open_param *param)
{
int ret, num_syncs, num_ufence = 0;
@@ -1409,7 +1412,9 @@ static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
- &param->syncs_user[num_syncs], 0);
+ &param->syncs_user[num_syncs],
+ stream->ufence_syncobj,
+ ++stream->ufence_timeline_value, 0);
if (ret)
goto err_syncs;
@@ -1539,7 +1544,7 @@ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
return -ENODEV;
param.xef = stream->xef;
- err = xe_oa_parse_syncs(stream->oa, &param);
+ err = xe_oa_parse_syncs(stream->oa, stream, &param);
if (err)
goto err_config_put;
@@ -1635,6 +1640,7 @@ static void xe_oa_destroy_locked(struct xe_oa_stream *stream)
if (stream->exec_q)
xe_exec_queue_put(stream->exec_q);
+ drm_syncobj_put(stream->ufence_syncobj);
kfree(stream);
}
@@ -1826,6 +1832,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
struct xe_oa_open_param *param)
{
struct xe_oa_stream *stream;
+ struct drm_syncobj *ufence_syncobj;
int stream_fd;
int ret;
@@ -1836,17 +1843,31 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
goto exit;
}
+ ret = drm_syncobj_create(&ufence_syncobj, DRM_SYNCOBJ_CREATE_SIGNALED,
+ NULL);
+ if (ret)
+ goto exit;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream) {
ret = -ENOMEM;
- goto exit;
+ goto err_syncobj;
}
-
+ stream->ufence_syncobj = ufence_syncobj;
stream->oa = oa;
- ret = xe_oa_stream_init(stream, param);
+
+ ret = xe_oa_parse_syncs(oa, stream, param);
if (ret)
goto err_free;
+ ret = xe_oa_stream_init(stream, param);
+ if (ret) {
+ while (param->num_syncs--)
+ xe_sync_entry_cleanup(&param->syncs[param->num_syncs]);
+ kfree(param->syncs);
+ goto err_free;
+ }
+
if (!param->disabled) {
ret = xe_oa_enable_locked(stream);
if (ret)
@@ -1870,6 +1891,8 @@ err_destroy:
xe_oa_stream_destroy(stream);
err_free:
kfree(stream);
+err_syncobj:
+ drm_syncobj_put(ufence_syncobj);
exit:
return ret;
}
@@ -2083,22 +2106,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- ret = xe_oa_parse_syncs(oa, &param);
- if (ret)
- goto err_exec_q;
-
mutex_lock(&param.hwe->gt->oa.gt_lock);
ret = xe_oa_stream_open_ioctl_locked(oa, &param);
mutex_unlock(&param.hwe->gt->oa.gt_lock);
if (ret < 0)
- goto err_sync_cleanup;
+ goto err_exec_q;
return ret;
-err_sync_cleanup:
- while (param.num_syncs--)
- xe_sync_entry_cleanup(&param.syncs[param.num_syncs]);
- kfree(param.syncs);
err_exec_q:
if (param.exec_q)
xe_exec_queue_put(param.exec_q);
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 2628f78c4e8d..daf701b5d48b 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,6 +15,8 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
+struct drm_syncobj;
+
#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
@@ -248,6 +250,12 @@ struct xe_oa_stream {
/** @xef: xe_file with which the stream was opened */
struct xe_file *xef;
+ /** @ufence_syncobj: User fence syncobj */
+ struct drm_syncobj *ufence_syncobj;
+
+ /** @ufence_timeline_value: User fence timeline value */
+ u64 ufence_timeline_value;
+
/** @last_fence: fence to use in stream destroy when needed */
struct dma_fence *last_fence;
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index a1c88f9a6c76..07f96bda638a 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index da2a412f80c0..129e7818565c 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (!vma)
return -EINVAL;
+ if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
+ drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
+ return 0;
+ }
+
if (xe_vma_has_default_mem_attrs(vma))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 82872a51f098..d48ab7b32ca5 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -113,6 +113,8 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
+ struct drm_syncobj *ufence_syncobj,
+ u64 ufence_timeline_value,
unsigned int flags)
{
struct drm_xe_sync sync_in;
@@ -192,10 +194,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (exec) {
sync->addr = sync_in.addr;
} else {
+ sync->ufence_timeline_value = ufence_timeline_value;
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
return PTR_ERR(sync->ufence);
+ sync->ufence_chain_fence = dma_fence_chain_alloc();
+ if (!sync->ufence_chain_fence)
+ return -ENOMEM;
+ sync->ufence_syncobj = ufence_syncobj;
}
break;
@@ -239,7 +246,12 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
} else if (sync->ufence) {
int err;
- dma_fence_get(fence);
+ drm_syncobj_add_point(sync->ufence_syncobj,
+ sync->ufence_chain_fence,
+ fence, sync->ufence_timeline_value);
+ sync->ufence_chain_fence = NULL;
+
+ fence = drm_syncobj_fence_get(sync->ufence_syncobj);
user_fence_get(sync->ufence);
err = dma_fence_add_callback(fence, &sync->ufence->cb,
user_fence_cb);
@@ -259,7 +271,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
drm_syncobj_put(sync->syncobj);
dma_fence_put(sync->fence);
dma_fence_chain_free(sync->chain_fence);
- if (sync->ufence)
+ dma_fence_chain_free(sync->ufence_chain_fence);
+ if (!IS_ERR_OR_NULL(sync->ufence))
user_fence_put(sync->ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 256ffc1e54dc..51f2d803e977 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -8,6 +8,7 @@
#include "xe_sync_types.h"
+struct drm_syncobj;
struct xe_device;
struct xe_exec_queue;
struct xe_file;
@@ -21,6 +22,8 @@ struct xe_vm;
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
+ struct drm_syncobj *ufence_syncobj,
+ u64 ufence_timeline_value,
unsigned int flags);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 30ac3f51993b..b88f1833e28c 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,9 +18,12 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
+ struct dma_fence_chain *ufence_chain_fence;
+ struct drm_syncobj *ufence_syncobj;
struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
+ u64 ufence_timeline_value;
u32 type;
u32 flags;
};
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
index fec331d791e7..b2d09c596714 100644
--- a/drivers/gpu/drm/xe/xe_validation.h
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)
*/
DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
if (_T) xe_validation_ctx_fini(_T);,
- ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
- _ret ? NULL : _ctx; }),
+ ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
+ *_ret ? NULL : _ctx; }),
struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
- struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
+ struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);
static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
{return *_T; }
#define class_xe_validation_is_conditional true
@@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
* exhaustive eviction.
*/
#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
- scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
+ scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \
drm_exec_until_all_locked(_exec)
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f602b874e054..ccb09ef4ec9e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
+#define XE_VMA_CREATE_MASK ( \
+ XE_VMA_READ_ONLY | \
+ XE_VMA_DUMPABLE | \
+ XE_VMA_SYSTEM_ALLOCATOR | \
+ DRM_GPUVA_SPARSE | \
+ XE_VMA_MADV_AUTORESET)
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
- op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
- op->map.is_null = xe_vma_is_null(vma);
+ op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
-#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
-
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
- bool is_cpu_addr_mirror =
- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
+ bool is_null = (flags & DRM_GPUVA_SPARSE);
+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
- if (is_cpu_addr_mirror)
- vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
- if (read_only)
- vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (dumpable)
- vma->gpuva.flags |= XE_VMA_DUMPABLE;
+ vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
- op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
- op->map.is_cpu_addr_mirror = flags &
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
- op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
+ op->map.vma_flags |= XE_VMA_READ_ONLY;
+ if (flags & DRM_XE_VM_BIND_FLAG_NULL)
+ op->map.vma_flags |= DRM_GPUVA_SPARSE;
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ op->map.vma_flags |= XE_VMA_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
+ op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
.pat_index = op->map.pat_index,
};
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->map.is_null ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->map.dumpable ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= op->map.is_cpu_addr_mirror ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
vma = new_vma(vm, &op->base.map, &default_attr,
flags);
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror) ||
+ !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= xe_vma_is_cpu_addr_mirror(old) ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
+ flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
&old->attr, flags);
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
- op == DRM_XE_VM_BIND_OP_UNMAP)) {
+ op == DRM_XE_VM_BIND_OP_UNMAP) ||
+ XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
+ (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3626,8 +3606,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
syncs_user = u64_to_user_ptr(args->syncs);
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
+ struct xe_exec_queue *__q = q ?: vm->q[0];
+
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs],
+ __q->ufence_syncobj,
+ ++__q->ufence_timeline_value,
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0) |
(!args->num_binds ?
@@ -4212,7 +4196,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct xe_vma_ops vops;
struct drm_gpuva_ops *ops = NULL;
struct drm_gpuva_op *__op;
- bool is_cpu_addr_mirror = false;
+ unsigned int vma_flags = 0;
bool remap_op = false;
struct xe_vma_mem_attr tmp_attr;
u16 default_pat;
@@ -4242,15 +4226,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
vma = gpuva_to_vma(op->base.unmap.va);
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_REMAP) {
vma = gpuva_to_vma(op->base.remap.unmap->va);
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.is_cpu_addr_mirror = true;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
op->map.pat_index = default_pat;
}
} else {
@@ -4259,11 +4245,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_assert(vm->xe, !remap_op);
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
remap_op = true;
-
- if (xe_vma_is_cpu_addr_mirror(vma))
- is_cpu_addr_mirror = true;
- else
- is_cpu_addr_mirror = false;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4254,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
/*
* In case of madvise ops DRM_GPUVA_OP_MAP is
* always after DRM_GPUVA_OP_REMAP, so ensure
- * we assign op->map.is_cpu_addr_mirror true
- * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ * to propagate the flags from the vma we're
+ * unmapping.
*/
- op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
}
}
print_op(vm->xe, __op);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 413353e1c225..d6e2a0fdd4b3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
/**
* struct xe_vma_mem_attr - memory attributes associated with vma
@@ -345,17 +346,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
- bool read_only;
- /** @is_null: is NULL binding */
- bool is_null;
- /** @is_cpu_addr_mirror: is CPU address mirror binding */
- bool is_cpu_addr_mirror;
- /** @dumpable: whether BO is dumped on GPU hang */
- bool dumpable;
- /** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index cd03891654a1..3cf30718b200 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -679,6 +679,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
},
{ XE_RTP_NAME("14023061436"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute), OR,
+ GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
@@ -916,6 +918,15 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("14024681466"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
+ },
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c
new file mode 100644
index 000000000000..183d408d893a
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_INSPIROY_2_M 0x0067
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_INSPIROY_2_M),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21j_221221
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T21k_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * and the dial as keyboard combinations. In tablet mode it uses a vendor specific
+ * hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF works for both modes. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Tablet_H641P
+ * # Report descriptor length: 18 bytes
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 0xFF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This rdesc does nothing until the tablet is switched to raw mode, see
+ * https://github.com/whot/huion-switcher
+ *
+ *
+ * Second hidraw node is the Pen. This one sends events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # Report descriptor length: 93 bytes
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18 <-- has no Invert eraser
+ * # 0x15, 0x00, // Logical Minimum (0) 20
+ * # 0x25, 0x01, // Logical Maximum (1) 22
+ * # 0x75, 0x01, // Report Size (1) 24
+ * # 0x95, 0x06, // Report Count (6) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x09, 0x32, // Usage (In Range) 30
+ * # 0x75, 0x01, // Report Size (1) 32
+ * # 0x95, 0x01, // Report Count (1) 34
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 40
+ * # 0x09, 0x30, // Usage (X) 42
+ * # 0x09, 0x31, // Usage (Y) 44
+ * # 0x55, 0x0d, // Unit Exponent (-3) 46 <-- change to -2
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 48 <-- change in³ to in
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 50
+ * # 0x35, 0x00, // Physical Minimum (0) 53
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 55 <-- invalid size
+ * # 0x75, 0x10, // Report Size (16) 58
+ * # 0x95, 0x02, // Report Count (2) 60
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 62
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 64
+ * # 0x09, 0x30, // Usage (Tip Pressure) 66
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 68
+ * # 0x75, 0x10, // Report Size (16) 71
+ * # 0x95, 0x01, // Report Count (1) 73
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * # 0x09, 0x3d, // Usage (X Tilt) 77 <-- No tilt reported
+ * # 0x09, 0x3e, // Usage (Y Tilt) 79
+ * # 0x15, 0x81, // Logical Minimum (-127) 81
+ * # 0x25, 0x7f, // Logical Maximum (127) 83
+ * # 0x75, 0x08, // Report Size (8) 85
+ * # 0x95, 0x02, // Report Count (2) 87
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 89
+ * # 0xc0, // End Collection 91
+ * # 0xc0, // End Collection 92
+ * R: 93 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 45 09 3c 15 00 25 01 7501 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 09 3d09 3e 15 81 25 7f 75 08 95 02 81 02 c0 c0
+ *
+ * Third hidraw node is the pad which sends a combination of keyboard shortcuts until
+ * the tablet is switched to raw mode, then it's mute:
+ *
+ * # Report descriptor length: 65 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x06, // Usage (Keyboard) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x03, // Report ID (3) 6
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 8
+ * # 0x19, 0xe0, // UsageMinimum (224) 10
+ * # 0x29, 0xe7, // UsageMaximum (231) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x08, // Report Count (8) 20
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 22
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 24
+ * # 0x19, 0x00, // UsageMinimum (0) 26
+ * # 0x29, 0xff, // UsageMaximum (255) 28
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 30
+ * # 0x75, 0x08, // Report Size (8) 33
+ * # 0x95, 0x06, // Report Count (6) 35
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 37
+ * # 0xc0, // End Collection 39
+ * # 0x05, 0x0c, // Usage Page (Consumer) 40
+ * # 0x09, 0x01, // Usage (Consumer Control) 42
+ * # 0xa1, 0x01, // Collection (Application) 44
+ * # 0x85, 0x04, // Report ID (4) 46
+ * # 0x19, 0x00, // UsageMinimum (0) 48
+ * # 0x2a, 0x3c, 0x02, // UsageMaximum (572) 50
+ * # 0x15, 0x00, // Logical Minimum (0) 53
+ * # 0x26, 0x3c, 0x02, // Logical Maximum (572) 55
+ * # 0x95, 0x01, // Report Count (1) 58
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 62
+ * # 0xc0, // End Collection 64
+ * R: 65 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 0507 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 19 00 2a 3c02 15 00 26 3c 02 95 01 75 10 81 00 c0
+ * N: HUION Huion Tablet_H641P
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 133
+#define PEN_REPORT_DESCRIPTOR_LENGTH 93
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 36
+#define PAD_REPORT_ID 3
+#define PEN_REPORT_ID 10
+#define VENDOR_REPORT_ID 8
+#define PAD_REPORT_LENGTH 8
+#define PEN_REPORT_LENGTH 10
+#define VENDOR_REPORT_LENGTH 12
+
+
+__u16 last_button_state;
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 2/3 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report is the wheel
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ // Byte 5 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0x8)
+ LogicalMinimum_i8(0x1)
+ LogicalMaximum_i8(0x8)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Arr|Abs)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+ )
+};
+
+static const __u8 fixed_rdesc_pen[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PEN_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // -- Byte 1 in report
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
+ Usage_GD_X
+ Input(Var|Abs) // Bytes 2+3
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
+ Usage_GD_Y
+ Input(Var|Abs) // Bytes 4+5
+ )
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
+ Input(Var|Abs) // Byte 6+7
+ // Two bytes padding so we don't need to change the report at all
+ ReportSize(8)
+ ReportCount(2)
+ Input(Const) // Byte 6+7
+ )
+ )
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // Byte 0
+ // We leave the pen on the vendor report ID
+ ReportId(VENDOR_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ // Note: reported logical range differs
+ // from the pen report ID for x and y
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
+ // Bytes 2/3 in report
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(20000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
+ // Bytes 4/5 in report
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ // Bytes 6/7 in report
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8192)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ ReportId(PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4 and 5 are the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0xa)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ Usage_i8(0x31) // maps to BTN_SOUTH
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportCount(5)
+ Input(Const)
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+static const __u8 disabled_rdesc_pen[] = {
+ FixedSizeVendorReport(PEN_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/pen nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+ if (rdesc_size == PEN_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pen, sizeof(disabled_rdesc_pen));
+ return sizeof(disabled_rdesc_pen);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pen, sizeof(fixed_rdesc_pen));
+ return sizeof(fixed_rdesc_pen);
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in default mode */
+ if (data[0] == PAD_REPORT_ID) {
+ /* Nicely enough, this device only supports one button down at a time so
+ * the reports are easy to match. Buttons numbered from the top
+ * Button released: 03 00 00 00 00 00 00 00
+ * Button 1: 03 00 05 00 00 00 00 00 -> b
+ * Button 2: 03 07 11 00 00 00 00 00 -> Ctrl Shift N
+ * Button 3: 03 00 08 00 00 00 00 00 -> e
+ * Button 4: 03 00 0c 00 00 00 00 00 -> i
+ * Button 5: 03 00 2c 00 00 00 00 00 -> space
+ * Button 6: 03 01 08 00 00 00 00 00 -> Ctrl E
+ * Button 7: 03 01 16 00 00 00 00 00 -> Ctrl S
+ * Button 8: 03 05 1d 00 00 00 00 00 -> Ctrl Alt Z
+ *
+ * Wheel down: 03 01 2d 00 00 00 00 00 -> Ctrl -
+ * Wheel up: 03 01 2e 00 00 00 00 00 -> Ctrl =
+ */
+ __u8 button = 0;
+ __u8 wheel = 0;
+
+ switch (data[1] << 8 | data[2]) {
+ case 0x0000:
+ break;
+ case 0x0005:
+ button = 1;
+ break;
+ case 0x0711:
+ button = 2;
+ break;
+ case 0x0008:
+ button = 3;
+ break;
+ case 0x000c:
+ button = 4;
+ break;
+ case 0x002c:
+ button = 5;
+ break;
+ case 0x0108:
+ button = 6;
+ break;
+ case 0x0116:
+ button = 7;
+ break;
+ case 0x051d:
+ button = 8;
+ break;
+ case 0x012d:
+ wheel = -1;
+ break;
+ case 0x012e:
+ wheel = 1;
+ break;
+ }
+
+ __u8 report[6] = {PAD_REPORT_ID, 0x0, 0x0, 0x0, wheel, button};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return sizeof(report);
+ }
+
+ /* Nothing to do for the PEN_REPORT_ID, it's already mapped */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* Pad reports */
+ if (data[1] & 0x20) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus;
+ __u8 x;
+ __u8 y;
+ __u16 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+ __u8 wheel = 0;
+
+ /* Wheel report */
+ if (data[1] == 0xf1) {
+ if (data[5] == 2)
+ wheel = 0xff;
+ else
+ wheel = data[5];
+ } else {
+ /* data[4] and data[5] are the buttons, mapped correctly */
+ last_button_state = data[4] | (data[5] << 8);
+ wheel = 0; // wheel
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->wheel = wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ /* Pen reports need nothing done */
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(inspiroy_2) = {
+ .hid_device_event = (void *)inspiroy_2_fix_events,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
index 13f64fb49800..79453362bf97 100644
--- a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
@@ -163,6 +163,9 @@ char EXPECTED_FIRMWARE_ID[] = "HUION_T21j_";
__u8 last_button_state;
+__u8 last_tip_state;
+__u8 last_sec_barrel_state;
+__u8 force_tip_down_count;
static const __u8 fixed_rdesc_pad[] = {
UsagePage_GenericDesktop
@@ -522,9 +525,31 @@ int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx)
pad_report->wheel = wheel;
return sizeof(struct pad_report);
- }
+ } else if (data[1] & 0x80) { /* Pen reports with InRange 1 */
+ __u8 tip_state = data[1] & 0x1;
+ __u8 sec_barrel_state = data[1] & 0x4;
+
+ if (force_tip_down_count > 0) {
+ data[1] |= 0x1;
+ --force_tip_down_count;
+ if (tip_state)
+ force_tip_down_count = 0;
+ }
- /* Pen reports need nothing done */
+ /* Tip was down and we just pressed or released the
+ * secondary barrel switch (the physical eraser
+ * button). The device will send up to 4
+ * reports with Tip Switch 0 and sometimes
+ * this report has Tip Switch 0.
+ */
+ if (last_tip_state &&
+ last_sec_barrel_state != sec_barrel_state) {
+ force_tip_down_count = 4;
+ data[1] |= 0x1;
+ }
+ last_tip_state = tip_state;
+ last_sec_barrel_state = sec_barrel_state;
+ }
}
return 0;
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
index 489cb4fcc2cd..5f43e4071848 100644
--- a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -9,12 +9,15 @@
#define VID_HUION 0x256C
#define PID_KAMVAS_PRO_19 0x006B
+#define PID_KAMVAS_PRO_27 0x006c
#define NAME_KAMVAS_PRO_19 "HUION Huion Tablet_GT1902"
+#define NAME_KAMVAS_PRO_27 "HUION Huion Tablet_GT2701"
#define TEST_PREFIX "uhid test "
HID_BPF_CONFIG(
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, VID_HUION, PID_KAMVAS_PRO_19),
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, VID_HUION, PID_KAMVAS_PRO_27),
);
bool prev_was_out_of_range;
@@ -351,7 +354,8 @@ int probe(struct hid_bpf_probe_args *ctx)
if (!__builtin_memcmp(name, TEST_PREFIX, sizeof(TEST_PREFIX) - 1))
name += sizeof(TEST_PREFIX) - 1;
- if (__builtin_memcmp(name, NAME_KAMVAS_PRO_19, sizeof(NAME_KAMVAS_PRO_19)))
+ if (__builtin_memcmp(name, NAME_KAMVAS_PRO_19, sizeof(NAME_KAMVAS_PRO_19)) &&
+ __builtin_memcmp(name, NAME_KAMVAS_PRO_27, sizeof(NAME_KAMVAS_PRO_27)))
ctx->retval = -EINVAL;
hid_bpf_release_context(hctx);
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c
new file mode 100644
index 000000000000..b63f9a48ea45
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Nicholas LaPointe
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256c
+#define PID_KAMVAS13_GEN3 0x2008
+
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define TABLET_DESCRIPTOR_LENGTH 368
+#define WHEEL_DESCRIPTOR_LENGTH 108
+
+#define VENDOR_REPORT_ID 8
+#define VENDOR_REPORT_LENGTH 14
+
+#define VENDOR_REPORT_SUBTYPE_PEN 0x08
+#define VENDOR_REPORT_SUBTYPE_PEN_OUT 0x00
+#define VENDOR_REPORT_SUBTYPE_BUTTONS 0x0e
+#define VENDOR_REPORT_SUBTYPE_WHEELS 0x0f
+
+/* For the reports that we create ourselves */
+#define CUSTOM_PAD_REPORT_ID 9
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_HUION, PID_KAMVAS13_GEN3),
+);
+
+
+/*
+ * This tablet can send reports using one of two different data formats,
+ * depending on what "mode" the tablet is in.
+ *
+ * By default, the tablet will send reports that can be decoded using its
+ * included HID descriptors (descriptors 1 and 2, shown below).
+ * This mode will be called "firmware mode" throughout this file.
+ *
+ * The HID descriptor that describes pen events in firmware mode (descriptor 1)
+ * has multiple bugs:
+ * * "Secondary Tip Switch" instead of "Secondary Barrel Switch"
+ * * "Invert" instead of (or potentially shared with) third barrel button
+ * * Specified tablet area of 2048 in³ instead of 293.8 x 165.2mm
+ * * Specified tilt range of -90 to +90 instead of -60 to +60
+ *
+ * While these can be easily patched up by editing the descriptor, a larger
+ * problem with the firmware mode exists: it is impossible to tell which of the
+ * two wheels are being rotated (or having their central button pressed).
+ *
+ *
+ * By using a tool such as huion-switcher (https://github.com/whot/huion-switcher),
+ * the tablet can be made to send reports using a proprietary format that is not
+ * adequately described by its relevant descriptor (descriptor 0, shown below).
+ * This mode will be called "vendor mode" throughout this file.
+ *
+ * The reports sent while in vendor mode allow for proper decoding of the wheels.
+ *
+ * For simplicity and maximum functionality, this BPF focuses strictly on
+ * enabling one to make use of the vendor mode.
+ */
+
+/*
+ * DESCRIPTORS
+ * DESCRIPTOR 0
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # â”… 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x68, // Report Size (104) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 18
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 21
+ * # 0xa1, 0x01, // Collection (Application) 23
+ * # â”… 0x85, 0x16, // Report ID (22) 25
+ * # 0x75, 0x08, // Report Size (8) 27
+ * # 0x95, 0x07, // Report Count (7) 29
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 31
+ * # â•‘ 0xb1, 0x02, // Feature (Data,Var,Abs) 33
+ * # 0xc0, // End Collection 35
+ * R: 36 06 00 ff 09 01 a1 01 85 08 75 68 95 01 09 01 81 02 c0 06 00 ff 09 01 a1 01 85 16 75 08 95 07 09 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ * DESCRIPTOR 1
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # â”… 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x43, // Usage (Secondary Tip Switch) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x45, // Usage (Eraser) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x06, // Report Count (6) 28
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x09, 0x32, // Usage (In Range) 32
+ * # 0x75, 0x01, // Report Size (1) 34
+ * # 0x95, 0x01, // Report Count (1) 36
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x30, // Usage (X) 44
+ * # 0x09, 0x31, // Usage (Y) 46
+ * # 0x55, 0x0d, // Unit Exponent (-3) 48
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 50
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 52
+ * # 0x35, 0x00, // Physical Minimum (0) 55
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x02, // Report Count (2) 62
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 64
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 66
+ * # 0x09, 0x30, // Usage (Tip Pressure) 68
+ * # 0x26, 0xff, 0x3f, // Logical Maximum (16383) 70
+ * # 0x75, 0x10, // Report Size (16) 73
+ * # 0x95, 0x01, // Report Count (1) 75
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 77
+ * # 0x09, 0x3d, // Usage (X Tilt) 79
+ * # 0x09, 0x3e, // Usage (Y Tilt) 81
+ * # 0x15, 0xa6, // Logical Minimum (-90) 83
+ * # 0x25, 0x5a, // Logical Maximum (90) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x02, // Report Count (2) 89
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 91
+ * # 0xc0, // End Collection 93
+ * # 0xc0, // End Collection 94
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 95
+ * # 0x09, 0x04, // Usage (Touch Screen) 97
+ * # 0xa1, 0x01, // Collection (Application) 99
+ * # â”… 0x85, 0x04, // Report ID (4) 101
+ * # 0x09, 0x22, // Usage (Finger) 103
+ * # 0xa1, 0x02, // Collection (Logical) 105
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 107
+ * # 0x95, 0x01, // Report Count (1) 109
+ * # 0x75, 0x06, // Report Size (6) 111
+ * # 0x09, 0x51, // Usage (Contact Identifier) 113
+ * # 0x15, 0x00, // Logical Minimum (0) 115
+ * # 0x25, 0x3f, // Logical Maximum (63) 117
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 119
+ * # 0x09, 0x42, // Usage (Tip Switch) 121
+ * # 0x25, 0x01, // Logical Maximum (1) 123
+ * # 0x75, 0x01, // Report Size (1) 125
+ * # 0x95, 0x01, // Report Count (1) 127
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 129
+ * # 0x75, 0x01, // Report Size (1) 131
+ * # 0x95, 0x01, // Report Count (1) 133
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 135
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 137
+ * # 0x75, 0x10, // Report Size (16) 139
+ * # 0x55, 0x0e, // Unit Exponent (-2) 141
+ * # 0x65, 0x11, // Unit (SILinear: cm) 143
+ * # 0x09, 0x30, // Usage (X) 145
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 147
+ * # 0x35, 0x00, // Physical Minimum (0) 150
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 152
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 155
+ * # 0x09, 0x31, // Usage (Y) 157
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 159
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 162
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 165
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 167
+ * # 0x09, 0x30, // Usage (Tip Pressure) 169
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 171
+ * # 0x75, 0x10, // Report Size (16) 174
+ * # 0x95, 0x01, // Report Count (1) 176
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 178
+ * # 0xc0, // End Collection 180
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 181
+ * # 0x09, 0x22, // Usage (Finger) 183
+ * # 0xa1, 0x02, // Collection (Logical) 185
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 187
+ * # 0x95, 0x01, // Report Count (1) 189
+ * # 0x75, 0x06, // Report Size (6) 191
+ * # 0x09, 0x51, // Usage (Contact Identifier) 193
+ * # 0x15, 0x00, // Logical Minimum (0) 195
+ * # 0x25, 0x3f, // Logical Maximum (63) 197
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 199
+ * # 0x09, 0x42, // Usage (Tip Switch) 201
+ * # 0x25, 0x01, // Logical Maximum (1) 203
+ * # 0x75, 0x01, // Report Size (1) 205
+ * # 0x95, 0x01, // Report Count (1) 207
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 209
+ * # 0x75, 0x01, // Report Size (1) 211
+ * # 0x95, 0x01, // Report Count (1) 213
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 215
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 217
+ * # 0x75, 0x10, // Report Size (16) 219
+ * # 0x55, 0x0e, // Unit Exponent (-2) 221
+ * # 0x65, 0x11, // Unit (SILinear: cm) 223
+ * # 0x09, 0x30, // Usage (X) 225
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 227
+ * # 0x35, 0x00, // Physical Minimum (0) 230
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 232
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 235
+ * # 0x09, 0x31, // Usage (Y) 237
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 239
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 242
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 245
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 247
+ * # 0x09, 0x30, // Usage (Tip Pressure) 249
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 251
+ * # 0x75, 0x10, // Report Size (16) 254
+ * # 0x95, 0x01, // Report Count (1) 256
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 258
+ * # 0xc0, // End Collection 260
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 261
+ * # 0x09, 0x56, // Usage (Scan Time) 263
+ * # 0x55, 0x00, // Unit Exponent (0) 265
+ * # 0x65, 0x00, // Unit (None) 267
+ * # 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647) 269
+ * # 0x95, 0x01, // Report Count (1) 274
+ * # 0x75, 0x20, // Report Size (32) 276
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 278
+ * # 0x09, 0x54, // Usage (Contact Count) 280
+ * # 0x25, 0x7f, // Logical Maximum (127) 282
+ * # 0x95, 0x01, // Report Count (1) 284
+ * # 0x75, 0x08, // Report Size (8) 286
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 288
+ * # 0x75, 0x08, // Report Size (8) 290
+ * # 0x95, 0x08, // Report Count (8) 292
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 294
+ * # â”… 0x85, 0x05, // Report ID (5) 296
+ * # 0x09, 0x55, // Usage (Contact Count Maximum) 298
+ * # 0x25, 0x0a, // Logical Maximum (10) 300
+ * # 0x75, 0x08, // Report Size (8) 302
+ * # 0x95, 0x01, // Report Count (1) 304
+ * # â•‘ 0xb1, 0x02, // Feature (Data,Var,Abs) 306
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 308
+ * # 0x09, 0xc5, // Usage (Vendor Usage 0xc5) 311
+ * # â”… 0x85, 0x06, // Report ID (6) 313
+ * # 0x15, 0x00, // Logical Minimum (0) 315
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 317
+ * # 0x75, 0x08, // Report Size (8) 320
+ * # 0x96, 0x00, 0x01, // Report Count (256) 322
+ * # â•‘ 0xb1, 0x02, // Feature (Data,Var,Abs) 325
+ * # 0xc0, // End Collection 327
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 328
+ * # 0x09, 0x06, // Usage (Keyboard) 330
+ * # 0xa1, 0x01, // Collection (Application) 332
+ * # â”… 0x85, 0x03, // Report ID (3) 334
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 336
+ * # 0x19, 0xe0, // UsageMinimum (224) 338
+ * # 0x29, 0xe7, // UsageMaximum (231) 340
+ * # 0x15, 0x00, // Logical Minimum (0) 342
+ * # 0x25, 0x01, // Logical Maximum (1) 344
+ * # 0x75, 0x01, // Report Size (1) 346
+ * # 0x95, 0x08, // Report Count (8) 348
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 350
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 352
+ * # 0x19, 0x00, // UsageMinimum (0) 354
+ * # 0x29, 0xff, // UsageMaximum (255) 356
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 358
+ * # 0x75, 0x08, // Report Size (8) 361
+ * # 0x95, 0x06, // Report Count (6) 363
+ * # ┇ 0x81, 0x00, // Input (Data,Arr,Abs) 365
+ * # 0xc0, // End Collection 367
+ * R: 368 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 43 09 3c 09 45 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 3f 75 10 95 01 81 02 09 3d 09 3e 15 a6 25 5a 75 08 95 02 81 02 c0 c0 05 0d 09 04 a1 01 85 04 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 56 55 00 65 00 27 ff ff ff 7f 95 01 75 20 81 02 09 54 25 7f 95 01 75 08 81 02 75 08 95 08 81 03 85 05 09 55 25 0a 75 08 95 01 b1 02 06 00 ff 09 c5 85 06 15 00 26 ff 00 75 08 96 00 01 b1 02 c0 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ * DESCRIPTOR 2
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # â”… 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Button 1) 26
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # ┇ 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # â•‘ 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * R: 108 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * VENDOR MODE
+ * HUION_FIRMWARE_ID="HUION_M22c_240606"
+ * HUION_MAGIC_BYTES="140388e500108100ff3fd8130307008008004010"
+ *
+ * MAGIC BYTES
+ * [LogicalMaximum, X] [LogicalMaximum, Y] [LogicalMaximum, Pressure] [ LPI]
+ * 14 03 [ 88 e5] 00 [ 10 81] 00 [ ff 3f] [d8 13] 03 07 00 80 08 00 40 10
+ *
+ *
+ * HIDRAW 0
+ * DESCRIPTIONS
+ * report_subtype = (data[1] >> 4) & 0x0f
+ *
+ * REPORT SUBTYPES
+ * 0x0e Buttons
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6 (top wheel button)
+ * (data[4] & 0x40) button 7 (bottom wheel button)
+ *
+ * All tablet buttons release with the same report:
+ * 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ * Despite data[4] looking like a bit field, only one button
+ * can be unambiguously tracked at a time.
+ * (See NOTES ON SIMULTANEOUS BUTTON HOLDS at the end of this
+ * comment for examples of the confusion this can create.)
+ *
+ * All buttons, with the exceptions of 6 and 7, will repeatedly
+ * report a press event approximately every 225ms while held.
+ *
+ * 0x0f Wheels
+ * data[3] == 1: top wheel
+ * data[3] == 2: bottom wheel
+ * data[5] == 1: clockwise
+ * data[5] == 2: counter-clockwise
+ *
+ * 0x08/0x00 Pen
+ * report_subtype == 0x08: in-range
+ * report_subtype == 0x00: out-of-range
+ * For clarity, this is also equivalent to:
+ * (data[1] & 0x80) in-range
+ *
+ * Switches
+ * (data[1] & 0x01) tip switch
+ * (data[1] & 0x02) barrel switch
+ * (data[1] & 0x04) secondary barrel switch
+ * (data[1] & 0x08) third barrel switch
+ *
+ * Unfortunately, I don't have a pen with an eraser, so I can't
+ * confirm where the invert and eraser bits reside.
+ * If we guess using the definitions from HID descriptor 1,
+ * then they might be...
+ * (data[1] & 0x08) invert (conflicts with third barrel switch)
+ * (data[1] & 0x10) eraser
+ *
+ * data[2], data[3] X (little-endian, maximum 0xe588)
+ *
+ * data[4], data[5] Y (little-endian, maximum 0x8110)
+ *
+ * data[6], data[7] Pressure (little-endian, maximum 0x3fff)
+ *
+ * data[10] X tilt (signed, -60 to +60)
+ * data[11] Y tilt (signed, -60 to +60, inverted)
+ *
+ *
+ * EXAMPLE REPORTS
+ * Top wheel button, press, hold, then release
+ * E: 000000.000040 14 08 e0 01 01 20 00 00 00 00 00 00 00 00 00
+ * E: 000001.531559 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel button, press, hold, then release
+ * E: 000002.787603 14 08 e0 01 01 40 00 00 00 00 00 00 00 00 00
+ * E: 000004.215609 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ *
+ * Top wheel rotation, one detent CW
+ * E: 000194.003899 14 08 f1 01 01 00 01 00 00 00 00 00 00 00 00
+ *
+ * Top wheel rotation, one detent CCW
+ * E: 000194.997812 14 08 f1 01 01 00 02 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel rotation, one detent CW
+ * E: 000196.693840 14 08 f1 01 02 00 01 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel rotation, one detent CCW
+ * E: 000197.757895 14 08 f1 01 02 00 02 00 00 00 00 00 00 00 00
+ *
+ *
+ * Button 1, press, hold, then release
+ * E: 000000.000149 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00 < press
+ * E: 000000.447598 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000000.673586 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000000.900582 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.126703 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.347706 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.533721 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 2, press, hold, then release
+ * E: 000003.304735 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00 < press
+ * E: 000003.746743 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000003.973741 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.199832 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.426732 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.647738 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.874733 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.930713 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 3, press, hold, then release
+ * E: 000006.650346 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00 < press
+ * E: 000007.051782 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000007.273738 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.499794 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.726725 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.947765 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000008.174755 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000008.328786 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 4, press, hold, then release
+ * E: 000009.893820 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00 < press
+ * E: 000010.274781 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000010.500931 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000010.722777 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000010.948778 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.175799 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.401153 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.432114 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 5, press, hold, then release
+ * E: 000013.007778 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00 < press
+ * E: 000013.424741 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000013.651715 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000013.872763 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.099789 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.325734 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.438080 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ *
+ * Pen: Top-left, then out of range
+ * E: 000368.572184 14 08 80 00 00 00 00 00 00 00 00 fb ed 03 00
+ * E: 000368.573030 14 08 00 00 00 00 00 00 00 00 00 fb ed 03 00
+ *
+ * Pen: Bottom-right, then out of range
+ * E: 000544.433185 14 08 80 88 e5 10 81 00 00 00 00 00 00 03 00
+ * E: 000544.434183 14 08 00 88 e5 10 81 00 00 00 00 00 00 03 00
+ *
+ * Pen: Max Y tilt (tip of pen points down)
+ * E: 000002.231927 14 08 80 f5 5d 6c 36 00 00 00 00 09 3c 03 00
+ *
+ * Pen: Min Y Tilt (tip of pen points up)
+ * E: 000657.593338 14 08 80 5f 69 fa 2c 00 00 00 00 fe c4 03 00
+ *
+ * Pen: Max X tilt (tip of pen points left)
+ * E: 000742.246503 14 08 80 2a 4f c4 38 00 00 00 00 3c ed 03 00
+ *
+ * Pen: Min X Tilt (tip of pen points right)
+ * E: 000776.404446 14 08 00 18 85 7c 3b 00 00 00 00 c4 ed 03 00
+ *
+ * Pen: Tip switch, max pressure, then low pressure
+ * E: 001138.935675 14 08 81 d2 66 04 40 ff 3f 00 00 00 08 03 00
+ *
+ * E: 001142.403715 14 08 81 9d 69 47 3e 82 04 00 00 00 07 03 00
+ *
+ * Pen: Barrel switch
+ * E: 001210.645652 14 08 82 0d 72 ea 2b 00 00 00 00 db c4 03 00
+ *
+ * Pen: Secondary barrel switch
+ * E: 001211.519729 14 08 84 2c 71 51 2b 00 00 00 00 da c4 03 00
+ *
+ * Pen: Third switch
+ * E: 001212.443722 14 08 88 1d 72 df 2b 00 00 00 00 dc c4 03 00
+ *
+ *
+ * HIDRAW 1
+ * No reports
+ *
+ *
+ * HIDRAW 2
+ * No reports
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * FIRMWARE MODE
+ * HIDRAW 0
+ * No reports
+ *
+ *
+ * HIDRAW 1
+ * EXAMPLE REPORTS
+ * Top wheel button, *release*
+ * E: 000067.043739 8 03 00 00 00 00 00 00 00
+ *
+ * Bottom wheel button, *release*
+ * E: 000068.219161 8 03 00 00 00 00 00 00 00
+ *
+ *
+ * Button 1, press, then release
+ * E: 000163.767870 8 03 00 05 00 00 00 00 00
+ * E: 000165.969193 8 03 00 00 00 00 00 00 00
+ *
+ * Button 2, press, then release
+ * E: 000261.728935 8 03 05 11 00 00 00 00 00
+ * E: 000262.956220 8 03 00 00 00 00 00 00 00
+ *
+ * Button 3, press, then release
+ * E: 000289.127881 8 03 01 16 00 00 00 00 00
+ * E: 000290.014594 8 03 00 00 00 00 00 00 00
+ *
+ * Button 4, press, then release
+ * E: 000303.025839 8 03 00 2c 00 00 00 00 00
+ * E: 000303.994479 8 03 00 00 00 00 00 00 00
+ *
+ * Button 5, press, then release
+ * E: 000315.500835 8 03 05 1d 00 00 00 00 00
+ * E: 000316.603274 8 03 00 00 00 00 00 00 00
+ *
+ * BUTTON SUMMARY
+ * 1 E: 000163.767870 8 03 00 05 00 00 00 00 00 Keyboard: B
+ * 2 E: 000261.728935 8 03 05 11 00 00 00 00 00 Keyboard: LCtrl+LAlt N
+ * 3 E: 000289.127881 8 03 01 16 00 00 00 00 00 Keyboard: LCtrl S
+ * 4 E: 000303.025839 8 03 00 2c 00 00 00 00 00 Keyboard: Space
+ * 5 E: 000315.500835 8 03 05 1d 00 00 00 00 00 Keyboard: LCtrl+LAlt
+ *
+ * All buttons (including the wheel buttons) release the same way:
+ * 03 00 00 00 00 00 00 00
+ *
+ *
+ * Pen: Top-left, then out of range
+ * E: 000063.196828 10 0a c0 00 00 00 00 00 00 00 02
+ * E: 000063.197762 10 0a 00 00 00 00 00 00 00 00 02
+ *
+ * Pen: Bottom-right, then out of range
+ * E: 000197.123138 10 0a c0 ff 7f ff 7f 00 00 00 00
+ * E: 000197.124915 10 0a 00 ff 7f ff 7f 00 00 00 00
+ *
+ * Pen: Max Y Tilt (tip of pen points up)
+ * E: 000291.399541 10 0a c0 19 32 0b 58 00 00 00 3c
+ *
+ * Pen: Min Y tilt (tip of pen points down)
+ * E: 000340.888288 10 0a c0 85 40 89 6e 00 00 17 c4
+ *
+ * Pen: Max X tilt (tip of pen points left)
+ * E: 000165.575115 10 0a c0 a7 34 99 42 00 00 3c f4
+ *
+ * Pen: Min X Tilt (tip of pen points right)
+ * E: 000129.507883 10 0a c0 ea 4b 08 40 00 00 c4 1a
+ *
+ * Pen: Tip switch, max pressure, then low pressure
+ * E: 000242.077160 10 0a c1 7e 3c 12 31 ff 3f 03 fd
+ *
+ * E: 000339.139188 10 0a c1 ee 3a 9e 32 b5 00 06 f6
+ *
+ * Pen: Barrel switch
+ * E: 000037.949777 10 0a c2 5c 28 47 2a 00 00 f6 3c
+ *
+ * Pen: Secondary barrel switch
+ * E: 000038.320840 10 0a c4 e4 27 fd 29 00 00 f3 38
+ *
+ * Pen: Third switch
+ * E: 000038.923822 10 0a c8 97 27 5f 29 00 00 f2 33
+ *
+ *
+ * HIDRAW 2
+ * EXAMPLE REPORTS
+ * Either wheel rotation, one detent CW
+ * E: 000097.276573 9 11 00 01 00 00 00 00 00 00
+ *
+ * Either wheel rotation, one detent CCW
+ * E: 000153.416538 9 11 00 ff ff 00 00 00 00 00
+ *
+ * Either wheel rotation, increasing rotation speed CW
+ * (Note that the wheels on my particular tablet may be
+ * damaged, so the false rotation direction changes
+ * that can be observed might not happen on other units.)
+ * E: 000210.514925 9 11 00 01 00 00 00 00 00 00
+ * E: 000210.725718 9 11 00 01 00 00 00 00 00 00
+ * E: 000210.924009 9 11 00 01 00 00 00 00 00 00
+ * E: 000211.205629 9 11 00 01 00 00 00 00 00 00
+ * E: 000211.280521 9 11 00 0b 00 00 00 00 00 00
+ * E: 000211.340121 9 11 00 0e 00 00 00 00 00 00
+ * E: 000211.404018 9 11 00 0d 00 00 00 00 00 00
+ * E: 000211.462060 9 11 00 0e 00 00 00 00 00 00
+ * E: 000211.544886 9 11 00 0a 00 00 00 00 00 00
+ * E: 000211.606130 9 11 00 0d 00 00 00 00 00 00
+ * E: 000211.674560 9 11 00 0c 00 00 00 00 00 00
+ * E: 000211.712039 9 11 00 16 00 00 00 00 00 00
+ * E: 000211.748076 9 11 00 17 00 00 00 00 00 00
+ * E: 000211.786016 9 11 00 17 00 00 00 00 00 00
+ * E: 000211.832960 9 11 00 11 00 00 00 00 00 00
+ * E: 000211.874081 9 11 00 14 00 00 00 00 00 00
+ * E: 000211.925094 9 11 00 10 00 00 00 00 00 00
+ * E: 000211.959048 9 11 00 18 00 00 00 00 00 00
+ * E: 000212.006937 9 11 00 11 00 00 00 00 00 00
+ * E: 000212.050055 9 11 00 13 00 00 00 00 00 00
+ * E: 000212.091947 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.122989 9 11 00 1a 00 00 00 00 00 00
+ * E: 000212.160866 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.194002 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.242249 9 11 00 11 00 00 00 00 00 00
+ * E: 000212.278061 9 11 00 18 00 00 00 00 00 00
+ * E: 000212.328899 9 11 00 10 00 00 00 00 00 00
+ * E: 000212.354005 9 11 00 22 00 00 00 00 00 00
+ * E: 000212.398995 9 11 00 12 00 00 00 00 00 00
+ * E: 000212.432050 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.471164 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.507047 9 11 00 17 00 00 00 00 00 00
+ * E: 000212.540964 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.567942 9 11 00 1f 00 00 00 00 00 00
+ * E: 000212.610007 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.641101 9 11 00 1b 00 00 00 00 00 00
+ * E: 000212.674113 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.674909 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.677062 9 11 00 00 02 00 00 00 00 00
+ * E: 000212.679048 9 11 00 55 01 00 00 00 00 00
+ * E: 000212.682166 9 11 00 55 01 00 00 00 00 00
+ * E: 000212.682788 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.683899 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.685827 9 11 00 67 fe 00 00 00 00 00
+ * E: 000212.686941 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.727840 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.772884 9 11 00 13 00 00 00 00 00 00
+ * E: 000212.810975 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.811793 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.812683 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.813905 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.814909 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.816942 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.817851 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.818752 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.819910 9 11 00 56 fd 00 00 00 00 00
+ * E: 000212.820781 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.821811 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.822920 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.823861 9 11 00 00 02 00 00 00 00 00
+ * E: 000212.828781 9 11 00 ba 00 00 00 00 00 00
+ * E: 000212.874097 9 11 00 12 00 00 00 00 00 00
+ * E: 000212.874872 9 11 00 00 fc 00 00 00 00 00
+ * E: 000212.876136 9 11 00 00 fc 00 00 00 00 00
+ * E: 000212.877036 9 11 00 00 f8 00 00 00 00 00
+ * E: 000212.877993 9 11 00 00 f8 00 00 00 00 00
+ * E: 000212.879748 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.880728 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.881956 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.885065 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.917060 9 11 00 1a 00 00 00 00 00 00
+ * E: 000212.936458 9 11 00 2d 00 00 00 00 00 00
+ * E: 000212.957860 9 11 00 25 00 00 00 00 00 00
+ * E: 000212.984019 9 11 00 20 00 00 00 00 00 00
+ * E: 000213.017915 9 11 00 19 00 00 00 00 00 00
+ * E: 000213.039973 9 11 00 27 00 00 00 00 00 00
+ * E: 000213.065933 9 11 00 21 00 00 00 00 00 00
+ * E: 000213.085807 9 11 00 28 00 00 00 00 00 00
+ * E: 000213.108888 9 11 00 25 00 00 00 00 00 00
+ * E: 000213.129726 9 11 00 29 00 00 00 00 00 00
+ * E: 000213.172043 9 11 00 14 00 00 00 00 00 00
+ * E: 000213.195873 9 11 00 23 00 00 00 00 00 00
+ * E: 000213.222884 9 11 00 20 00 00 00 00 00 00
+ * E: 000213.243220 9 11 00 2a 00 00 00 00 00 00
+ * E: 000213.266778 9 11 00 24 00 00 00 00 00 00
+ * E: 000213.285951 9 11 00 2b 00 00 00 00 00 00
+ * E: 000213.306045 9 11 00 2a 00 00 00 00 00 00
+ * E: 000213.306796 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.307755 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.308820 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.309971 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.310980 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.311853 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.312861 9 11 00 aa 02 00 00 00 00 00
+ * E: 000213.313884 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.315111 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.315992 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.316955 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.346065 9 11 00 1d 00 00 00 00 00 00
+ * E: 000213.346963 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.347874 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.348736 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.349795 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.350791 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.351791 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.352729 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.353811 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.354755 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.355795 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.356813 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.357817 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.393838 9 11 00 17 00 00 00 00 00 00
+ * E: 000213.394719 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.395682 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.396679 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.397651 9 11 00 00 fc 00 00 00 00 00
+ * E: 000213.398661 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.400308 9 11 00 56 fd 00 00 00 00 00
+ * E: 000213.400909 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.401837 9 11 00 01 00 00 00 00 00 00
+ *
+ * Either wheel rotation, increasing rotation speed CCW
+ * (Note that the wheels on my particular tablet may be
+ * damaged, so the false rotation direction changes
+ * that can be observed might not happen on other units.)
+ * E: 000040.527820 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.816644 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.880423 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000040.882570 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.883381 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.885463 9 11 00 aa 02 00 00 00 00 00
+ * E: 000040.924106 9 11 00 ea ff 00 00 00 00 00
+ * E: 000041.006155 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.085799 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.168492 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.233453 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000041.296641 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000041.370302 9 11 00 f5 ff 00 00 00 00 00
+ * E: 000041.437410 9 11 00 f4 ff 00 00 00 00 00
+ * E: 000041.474514 9 11 00 e9 ff 00 00 00 00 00
+ * E: 000041.522171 9 11 00 ef ff 00 00 00 00 00
+ * E: 000041.568160 9 11 00 ee ff 00 00 00 00 00
+ * E: 000041.608146 9 11 00 ec ff 00 00 00 00 00
+ * E: 000041.627132 9 11 00 d3 ff 00 00 00 00 00
+ * E: 000041.656151 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.682264 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000041.714186 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000041.740339 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000041.772087 9 11 00 e5 ff 00 00 00 00 00
+ * E: 000041.801093 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.834051 9 11 00 e7 ff 00 00 00 00 00
+ * E: 000041.863094 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.901016 9 11 00 ea ff 00 00 00 00 00
+ * E: 000041.901956 9 11 00 00 04 00 00 00 00 00
+ * E: 000041.902837 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.903927 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.905066 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.907214 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.909011 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.909953 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.910917 9 11 00 00 08 00 00 00 00 00
+ * E: 000041.913280 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.914121 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.915346 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.962101 9 11 00 ee ff 00 00 00 00 00
+ * E: 000041.964062 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.964978 9 11 00 00 fc 00 00 00 00 00
+ * E: 000041.968058 9 11 00 24 01 00 00 00 00 00
+ * E: 000041.968880 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.970977 9 11 00 aa 02 00 00 00 00 00
+ * E: 000041.971932 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.972943 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.975291 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.978274 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.035079 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.041283 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.042057 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.045169 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.051242 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.056099 9 11 00 63 ff 00 00 00 00 00
+ * E: 000042.106329 9 11 00 ef ff 00 00 00 00 00
+ * E: 000042.108601 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.116259 9 11 00 6b 00 00 00 00 00 00
+ * E: 000042.119140 9 11 00 55 01 00 00 00 00 00
+ * E: 000042.126101 9 11 00 88 ff 00 00 00 00 00
+ * E: 000042.158009 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000042.172108 9 11 00 be ff 00 00 00 00 00
+ * E: 000042.207417 9 11 00 e8 ff 00 00 00 00 00
+ * E: 000042.223155 9 11 00 cc ff 00 00 00 00 00
+ * E: 000042.255185 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000042.276280 9 11 00 d7 ff 00 00 00 00 00
+ * E: 000042.302128 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.317423 9 11 00 c8 ff 00 00 00 00 00
+ * E: 000042.345226 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.357243 9 11 00 bc ff 00 00 00 00 00
+ * E: 000042.381308 9 11 00 dc ff 00 00 00 00 00
+ * E: 000042.383180 9 11 00 dc fe 00 00 00 00 00
+ * E: 000042.412288 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000042.451216 9 11 00 eb ff 00 00 00 00 00
+ * E: 000042.478372 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.502116 9 11 00 dd ff 00 00 00 00 00
+ * E: 000042.520105 9 11 00 d3 ff 00 00 00 00 00
+ * E: 000042.540345 9 11 00 d6 ff 00 00 00 00 00
+ * E: 000042.541021 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.542009 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.543045 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.544279 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.545097 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.546074 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.547237 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.548029 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.549304 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.553123 9 11 00 00 ff 00 00 00 00 00
+ * E: 000042.581186 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.582238 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.583150 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.584273 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.585019 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.586059 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.589012 9 11 00 67 fe 00 00 00 00 00
+ * E: 000042.590066 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.592916 9 11 00 dc fe 00 00 00 00 00
+ * E: 000042.621124 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.622092 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.623069 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.624030 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.625006 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.626068 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.626876 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.628392 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.628918 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.630009 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.631934 9 11 00 00 fe 00 00 00 00 00
+ * E: 000042.656285 9 11 00 dd ff 00 00 00 00 00
+ * E: 000042.659870 9 11 00 cc 00 00 00 00 00 00
+ * E: 000042.666128 9 11 00 9d 00 00 00 00 00 00
+ * E: 000042.672458 9 11 00 80 ff 00 00 00 00 00
+ * E: 000042.696106 9 11 00 dc ff 00 00 00 00 00
+ * E: 000042.705129 9 11 00 61 00 00 00 00 00 00
+ * E: 000042.731303 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.741278 9 11 00 ab ff 00 00 00 00 00
+ * E: 000042.788181 9 11 00 ee ff 00 00 00 00 00
+ * E: 000042.810441 9 11 00 db ff 00 00 00 00 00
+ * E: 000042.838073 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.852235 9 11 00 c4 ff 00 00 00 00 00
+ * E: 000042.882290 9 11 00 e4 ff 00 00 00 00 00
+ *
+ * Either wheel button, press, hold, then release
+ * E: 000202.084982 9 11 02 00 00 00 00 00 00 00
+ * E: 000202.090172 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.094139 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.099172 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.105055 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.109132 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.114185 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.119212 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.124264 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.130147 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.135138 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.140072 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.145146 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.150157 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.155339 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.160064 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.165026 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.170037 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.175154 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.180044 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.186280 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.191281 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.196106 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.201083 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.206166 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.211084 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.216175 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.221036 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.226271 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.231150 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.235924 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.242046 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.247164 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.252359 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.257295 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.262167 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.267081 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.272175 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.277085 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.282596 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.287078 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.292191 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.298196 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.303004 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.308113 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.313079 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.318243 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.323309 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.328190 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.333050 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.338162 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.343022 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.348113 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.354133 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.359132 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.364053 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.369034 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.374144 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.379027 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.384238 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.389249 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.394049 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.398949 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.404203 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.410098 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.415237 9 11 00 00 00 00 00 00 00 00
+ *
+ *
+ * Top wheel button press and release while holding bottom wheel button
+ * (The reverse action (a bottom wheel button press while holding the top wheel button) is invisible.)
+ * E: 000071.126966 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.133117 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.137481 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.142036 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.147027 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.151988 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.157945 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.163657 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.168240 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.173109 9 11 02 00 00 00 00 00 00 00 < top wheel button press?
+ * E: 000071.178119 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.183046 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.187983 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.192996 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.198341 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.203122 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.208998 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.214037 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.218945 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.223835 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.228987 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.234082 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.239028 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.244307 9 11 00 00 00 00 00 00 00 00 < top wheel button release?
+ * E: 000071.245867 9 11 03 00 00 00 00 00 00 00 < continued hold of bottom button
+ * E: 000071.249959 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.255032 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.259972 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.265409 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.270156 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.275530 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.279975 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.285046 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.290906 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.296146 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.301288 9 11 03 00 00 00 00 00 00 00
+ *
+ * Top wheel button hold while top wheel rotate CCW
+ * (I did not test the other combinations of this)
+ * E: 000022.253144 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.258157 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.262011 9 11 00 ff ff 00 00 00 00 00
+ * E: 000022.264015 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.268976 9 11 03 00 00 00 00 00 00 00
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * NOTES ON SIMULTANEOUS BUTTON HOLDS
+ * (applies to vendor mode only)
+ * Value replacements for ease of reading:
+ * .7 = 0x40 (button 7, a wheel button)
+ * .1 = 0x01 (button 1, a pad button)
+ * rr = 0x00 (no buttons pressed)
+ *
+ * Press 7
+ * Press 1
+ * Release 7
+ * Release 1
+ * B: 000000.000152 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000000.781784 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000000.869845 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.095688 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.322635 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.543643 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.770652 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.885659 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 7
+ * B: 000001.993620 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.220671 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.446589 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.672559 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.765183 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1
+ *
+ * Press 7
+ * Press 1
+ * Release 1
+ * Release 7
+ * B: 000017.071517 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000018.270461 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.419486 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.646438 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.872493 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000019.094422 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000019.320488 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000020.360505 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1 is not reported until 7 is released, then both are rapidly reported
+ * B: 000020.361091 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00
+ *
+ * Press 1
+ * Press 7
+ * Release 7
+ * Release 1
+ * B: 000031.516315 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000031.922299 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.144165 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.370262 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.396242 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000032.597270 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.818187 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.045143 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.267535 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 7
+ * B: 000033.272602 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.494246 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.721266 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.947237 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000034.169294 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000034.183585 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1
+ *
+ * Press 1
+ * Press 7
+ * Release 1
+ * Release 7
+ * B: 000056.628429 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.046348 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.272044 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.494434 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.601224 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000057.719262 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.946941 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000058.172346 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000058.393994 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000059.434576 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1 is not reported until 7 is released, then both are rapidly reported
+ * B: 000059.435857 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00
+ */
+
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+char EXPECTED_FIRMWARE_ID[] = "HUION_M22c_";
+
+__u8 last_button_state;
+
+static const __u8 disabled_rdesc_tablet[] = {
+ FixedSizeVendorReport(28) /* Input report 4 */
+};
+
+static const __u8 disabled_rdesc_wheel[] = {
+ FixedSizeVendorReport(9) /* Input report 17 */
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionPhysical(
+ /*
+ * I have only examined the tablet's behavior while using
+ * the PW600L pen, which does not have an eraser.
+ * Because of this, I don't know where the Eraser and Invert
+ * bits will go, or if they work as one would expect.
+ *
+ * For the time being, there is no expectation that a pen
+ * with an eraser will work without modifications here.
+ */
+ ReportSize(1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(3)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ Input(Var|Abs)
+ PushPop(
+ ReportCount(1)
+ UsagePage_Button
+ Usage_i8(0x4a) /* (BTN_STYLUS3 + 1) & 0xff */
+ Input(Var|Abs)
+ )
+ ReportCount(3)
+ Input(Const)
+ ReportCount(1)
+ Usage_Dig_InRange
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-2)
+ LogicalMinimum_i16(0)
+ PhysicalMinimum_i16(0)
+ /*
+ * The tablet has a logical maximum of 58760 x 33040
+ * and a claimed resolution of 5080 LPI (200 L/mm)
+ * This works out to a physical maximum of
+ * 293.8 x 165.2mm, which matches Huion's advertised
+ * active area dimensions from
+ * https://www.huion.com/products/pen_display/Kamvas/kamvas-13-gen-3.html
+ */
+ LogicalMaximum_i16(58760)
+ PhysicalMaximum_i16(2938)
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalMaximum_i16(33040)
+ PhysicalMaximum_i16(1652)
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(16383)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ ReportCount(1)
+ Input(Const)
+ ReportSize(8)
+ ReportCount(2)
+ PushPop(
+ Unit(deg)
+ UnitExponent(0)
+ LogicalMinimum_i8(-60)
+ PhysicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
+ PhysicalMaximum_i8(60)
+ Usage_Dig_XTilt
+ Usage_Dig_YTilt
+ Input(Var|Abs)
+ )
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(CUSTOM_PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /*
+ * The first 3 bytes are somewhat vestigial and will
+ * always be set to zero. Their presence here is needed
+ * to ensure that this device will be detected as a
+ * tablet pad by software that otherwise wouldn't know
+ * any better.
+ */
+ /* (data[1] & 0x01) barrel switch */
+ ReportSize(1)
+ ReportCount(1)
+ Usage_Dig_BarrelSwitch
+ Input(Var|Abs)
+ ReportCount(7)
+ Input(Const)
+ /* data[2] X */
+ /* data[3] Y */
+ ReportSize(8)
+ ReportCount(2)
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ Input(Var|Abs)
+ /*
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6 (top wheel button)
+ * (data[4] & 0x40) button 7 (bottom wheel button)
+ */
+ ReportSize(1)
+ ReportCount(7)
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(7)
+ Input(Var|Abs)
+ ReportCount(1)
+ Input(Const)
+ /* data[5] top wheel (signed, positive clockwise) */
+ ReportSize(8)
+ ReportCount(1)
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ Input(Var|Rel)
+ /* data[6] bottom wheel (signed, positive clockwise) */
+ UsagePage_Consumer
+ Usage_Con_ACPan
+ Input(Var|Rel)
+ )
+ /*
+ * The kernel will drop reports that are bigger than the
+ * largest report specified in the HID descriptor.
+ * Therefore, our modified descriptor needs to have at least one
+ * HID report that is as long as, or longer than, the largest
+ * report in the original descriptor.
+ *
+ * This macro expands to a no-op report that is padded to the
+ * provided length.
+ */
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc_huion_kamvas13_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hid_ctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+
+ if (have_fw_id) {
+ /*
+ * Tablet should be in vendor mode.
+ * Disable the unused devices
+ */
+ if (rdesc_size == TABLET_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_tablet,
+ sizeof(disabled_rdesc_tablet));
+ return sizeof(disabled_rdesc_tablet);
+ }
+
+ if (rdesc_size == WHEEL_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_wheel,
+ sizeof(disabled_rdesc_wheel));
+ return sizeof(disabled_rdesc_wheel);
+ }
+ }
+
+ /*
+ * Regardless of which mode the tablet is in, always fix the vendor
+ * descriptor in case the udev property just happened to not be set
+ */
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(hid_fix_event_huion_kamvas13_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, VENDOR_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Handle vendor reports only */
+ if (hid_ctx->size != VENDOR_REPORT_LENGTH)
+ return 0;
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ __u8 report_subtype = (data[1] >> 4) & 0x0f;
+
+ if (report_subtype == VENDOR_REPORT_SUBTYPE_PEN ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_PEN_OUT) {
+ /* Invert Y tilt */
+ data[11] = -data[11];
+
+ } else if (report_subtype == VENDOR_REPORT_SUBTYPE_BUTTONS ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_WHEELS) {
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 padding:7;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __s8 top_wheel;
+ __s8 bottom_wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __s8 top_wheel = 0;
+ __s8 bottom_wheel = 0;
+
+ switch (report_subtype) {
+ case VENDOR_REPORT_SUBTYPE_WHEELS:
+ /*
+ * The wheel direction byte is 1 for clockwise rotation
+ * and 2 for counter-clockwise.
+ * Change it to 1 and -1, respectively.
+ */
+ switch (data[3]) {
+ case 1:
+ top_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ case 2:
+ bottom_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ }
+ break;
+
+ case VENDOR_REPORT_SUBTYPE_BUTTONS:
+ /*
+ * If a button is already being held, ignore any new
+ * button event unless it's a release.
+ *
+ * The tablet only cleanly handles one button being held
+ * at a time, and trying to hold multiple buttons
+ * (particularly wheel+pad buttons) can result in sequences
+ * of reports that look like imaginary presses and releases.
+ *
+ * This is an imperfect way to filter out some of these
+ * reports.
+ */
+ if (last_button_state != 0x00 && data[4] != 0x00)
+ break;
+
+ last_button_state = data[4];
+ break;
+ }
+
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = CUSTOM_PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->top_wheel = top_wheel;
+ pad_report->bottom_wheel = bottom_wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(huion_kamvas13_gen3) = {
+ .hid_device_event = (void *)hid_fix_event_huion_kamvas13_gen3,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_huion_kamvas13_gen3,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case VENDOR_DESCRIPTOR_LENGTH:
+ case TABLET_DESCRIPTOR_LENGTH:
+ case WHEEL_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c
new file mode 100644
index 000000000000..ac66c6e65eb4
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Nicholas LaPointe
+ * Copyright (c) 2025 Higgins Dragon
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256c
+#define PID_KAMVAS16_GEN3 0x2009
+
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define TABLET_DESCRIPTOR_LENGTH 328
+#define WHEEL_DESCRIPTOR_LENGTH 200
+
+#define VENDOR_REPORT_ID 8
+#define VENDOR_REPORT_LENGTH 14
+
+#define VENDOR_REPORT_SUBTYPE_PEN 0x08
+#define VENDOR_REPORT_SUBTYPE_PEN_OUT 0x00
+#define VENDOR_REPORT_SUBTYPE_BUTTONS 0x0e
+#define VENDOR_REPORT_SUBTYPE_WHEELS 0x0f
+
+/* For the reports that we create ourselves */
+#define CUSTOM_PAD_REPORT_ID 9
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_HUION, PID_KAMVAS16_GEN3),
+);
+
+/*
+ * This tablet can send reports using one of two different data formats,
+ * depending on what "mode" the tablet is in.
+ *
+ * By default, the tablet will send reports that can be decoded using its
+ * included HID descriptors (descriptors 1 and 2, shown below).
+ * This mode will be called "firmware mode" throughout this file.
+ *
+ * The HID descriptor that describes pen events in firmware mode (descriptor 1)
+ * has multiple bugs:
+ * * "Secondary Tip Switch" instead of "Secondary Barrel Switch"
+ * * "Invert" instead of (or potentially shared with) third barrel button
+ * * Specified tablet area of 2048 in³ instead of 293.8 x 165.2mm
+ * * Specified tilt range of -90 to +90 instead of -60 to +60
+ *
+ * While these can be easily patched up by editing the descriptor, a larger
+ * problem with the firmware mode exists: it is impossible to tell which of the
+ * two wheels are being rotated (or having their central button pressed).
+ *
+ *
+ * By using a tool such as huion-switcher (https://github.com/whot/huion-switcher),
+ * the tablet can be made to send reports using a proprietary format that is not
+ * adequately described by its relevant descriptor (descriptor 0, shown below).
+ * This mode will be called "vendor mode" throughout this file.
+ *
+ * The reports sent while in vendor mode allow for proper decoding of the wheels.
+ *
+ * For simplicity and maximum functionality, this BPF focuses strictly on
+ * enabling one to make use of the vendor mode.
+ */
+
+/*
+ * DESCRIPTORS
+ * DESCRIPTOR 0
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x68, // Report Size (104) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 18
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 21
+ * # 0xa1, 0x01, // Collection (Application) 23
+ * # 0x85, 0x16, // Report ID (22) 25
+ * # 0x75, 0x08, // Report Size (8) 27
+ * # 0x95, 0x07, // Report Count (7) 29
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 31
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 33
+ * # 0xc0, // End Collection 35
+ * #
+ * R: 36 06 00 ff 09 01 a1 01 85 08 75 68 95 01 09 01 81 02 c0 06 00 ff 09 01 a1 01 85 16 75 08 95 07 09 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ *
+ * DESCRIPTOR 1
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x43, // Usage (Secondary Tip Switch) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x45, // Usage (Eraser) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x06, // Report Count (6) 28
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x09, 0x32, // Usage (In Range) 32
+ * # 0x75, 0x01, // Report Size (1) 34
+ * # 0x95, 0x01, // Report Count (1) 36
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x30, // Usage (X) 44
+ * # 0x09, 0x31, // Usage (Y) 46
+ * # 0x55, 0x0d, // Unit Exponent (-3) 48
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 50
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 52
+ * # 0x35, 0x00, // Physical Minimum (0) 55
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x02, // Report Count (2) 62
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 64
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 66
+ * # 0x09, 0x30, // Usage (Tip Pressure) 68
+ * # 0x26, 0xff, 0x3f, // Logical Maximum (16383) 70
+ * # 0x75, 0x10, // Report Size (16) 73
+ * # 0x95, 0x01, // Report Count (1) 75
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 77
+ * # 0x09, 0x3d, // Usage (X Tilt) 79
+ * # 0x09, 0x3e, // Usage (Y Tilt) 81
+ * # 0x15, 0xa6, // Logical Minimum (-90) 83
+ * # 0x25, 0x5a, // Logical Maximum (90) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x02, // Report Count (2) 89
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 91
+ * # 0xc0, // End Collection 93
+ * # 0xc0, // End Collection 94
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 95
+ * # 0x09, 0x04, // Usage (Touch Screen) 97
+ * # 0xa1, 0x01, // Collection (Application) 99
+ * # 0x85, 0x04, // Report ID (4) 101
+ * # 0x09, 0x22, // Usage (Finger) 103
+ * # 0xa1, 0x02, // Collection (Logical) 105
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 107
+ * # 0x95, 0x01, // Report Count (1) 109
+ * # 0x75, 0x06, // Report Size (6) 111
+ * # 0x09, 0x51, // Usage (Contact Id) 113
+ * # 0x15, 0x00, // Logical Minimum (0) 115
+ * # 0x25, 0x3f, // Logical Maximum (63) 117
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 119
+ * # 0x09, 0x42, // Usage (Tip Switch) 121
+ * # 0x25, 0x01, // Logical Maximum (1) 123
+ * # 0x75, 0x01, // Report Size (1) 125
+ * # 0x95, 0x01, // Report Count (1) 127
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 129
+ * # 0x75, 0x01, // Report Size (1) 131
+ * # 0x95, 0x01, // Report Count (1) 133
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 135
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 137
+ * # 0x75, 0x10, // Report Size (16) 139
+ * # 0x55, 0x0e, // Unit Exponent (-2) 141
+ * # 0x65, 0x11, // Unit (SILinear: cm) 143
+ * # 0x09, 0x30, // Usage (X) 145
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 147
+ * # 0x35, 0x00, // Physical Minimum (0) 150
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 152
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 155
+ * # 0x09, 0x31, // Usage (Y) 157
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 159
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 162
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 165
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 167
+ * # 0x09, 0x30, // Usage (Tip Pressure) 169
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 171
+ * # 0x75, 0x10, // Report Size (16) 174
+ * # 0x95, 0x01, // Report Count (1) 176
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 178
+ * # 0xc0, // End Collection 180
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 181
+ * # 0x09, 0x22, // Usage (Finger) 183
+ * # 0xa1, 0x02, // Collection (Logical) 185
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 187
+ * # 0x95, 0x01, // Report Count (1) 189
+ * # 0x75, 0x06, // Report Size (6) 191
+ * # 0x09, 0x51, // Usage (Contact Id) 193
+ * # 0x15, 0x00, // Logical Minimum (0) 195
+ * # 0x25, 0x3f, // Logical Maximum (63) 197
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 199
+ * # 0x09, 0x42, // Usage (Tip Switch) 201
+ * # 0x25, 0x01, // Logical Maximum (1) 203
+ * # 0x75, 0x01, // Report Size (1) 205
+ * # 0x95, 0x01, // Report Count (1) 207
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 209
+ * # 0x75, 0x01, // Report Size (1) 211
+ * # 0x95, 0x01, // Report Count (1) 213
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 215
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 217
+ * # 0x75, 0x10, // Report Size (16) 219
+ * # 0x55, 0x0e, // Unit Exponent (-2) 221
+ * # 0x65, 0x11, // Unit (SILinear: cm) 223
+ * # 0x09, 0x30, // Usage (X) 225
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 227
+ * # 0x35, 0x00, // Physical Minimum (0) 230
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 232
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 235
+ * # 0x09, 0x31, // Usage (Y) 237
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 239
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 242
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 245
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 247
+ * # 0x09, 0x30, // Usage (Tip Pressure) 249
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 251
+ * # 0x75, 0x10, // Report Size (16) 254
+ * # 0x95, 0x01, // Report Count (1) 256
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 258
+ * # 0xc0, // End Collection 260
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 261
+ * # 0x09, 0x56, // Usage (Scan Time) 263
+ * # 0x55, 0x00, // Unit Exponent (0) 265
+ * # 0x65, 0x00, // Unit (None) 267
+ * # 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647) 269
+ * # 0x95, 0x01, // Report Count (1) 274
+ * # 0x75, 0x20, // Report Size (32) 276
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 278
+ * # 0x09, 0x54, // Usage (Contact Count) 280
+ * # 0x25, 0x7f, // Logical Maximum (127) 282
+ * # 0x95, 0x01, // Report Count (1) 284
+ * # 0x75, 0x08, // Report Size (8) 286
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 288
+ * # 0x75, 0x08, // Report Size (8) 290
+ * # 0x95, 0x08, // Report Count (8) 292
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 294
+ * # 0x85, 0x05, // Report ID (5) 296
+ * # 0x09, 0x55, // Usage (Contact Max) 298
+ * # 0x25, 0x0a, // Logical Maximum (10) 300
+ * # 0x75, 0x08, // Report Size (8) 302
+ * # 0x95, 0x01, // Report Count (1) 304
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 306
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 308
+ * # 0x09, 0xc5, // Usage (Vendor Usage 0xc5) 311
+ * # 0x85, 0x06, // Report ID (6) 313
+ * # 0x15, 0x00, // Logical Minimum (0) 315
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 317
+ * # 0x75, 0x08, // Report Size (8) 320
+ * # 0x96, 0x00, 0x01, // Report Count (256) 322
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 325
+ * # 0xc0, // End Collection 327
+ * #
+ * R: 328 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 43 09 3c 09 45 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 3f 75 10 95 01 81 02 09 3d 09 3e 15 a6 25 5a 75 08 95 02 81 02 c0 c0 05 0d 09 04 a1 01 85 04 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 56 55 00 65 00 27 ff ff ff 7f 95 01 75 20 81 02 09 54 25 7f 95 01 75 08 81 02 75 08 95 08 81 03 85 05 09 55 25 0a 75 08 95 01 b1 02 06 00 ff 09 c5 85 06 15 00 26 ff 00 75 08 96 00 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ * DESCRIPTOR 2
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 108
+ * # 0x09, 0x06, // Usage (Keyboard) 110
+ * # 0xa1, 0x01, // Collection (Application) 112
+ * # 0x85, 0x03, // Report ID (3) 114
+ * # 0x05, 0x07, // Usage Page (Keyboard) 116
+ * # 0x19, 0xe0, // Usage Minimum (224) 118
+ * # 0x29, 0xe7, // Usage Maximum (231) 120
+ * # 0x15, 0x00, // Logical Minimum (0) 122
+ * # 0x25, 0x01, // Logical Maximum (1) 124
+ * # 0x75, 0x01, // Report Size (1) 126
+ * # 0x95, 0x08, // Report Count (8) 128
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 130
+ * # 0x05, 0x07, // Usage Page (Keyboard) 132
+ * # 0x19, 0x00, // Usage Minimum (0) 134
+ * # 0x29, 0xff, // Usage Maximum (255) 136
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 138
+ * # 0x75, 0x08, // Report Size (8) 141
+ * # 0x95, 0x06, // Report Count (6) 143
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 145
+ * # 0xc0, // End Collection 147
+ * # 0x05, 0x0c, // Usage Page (Consumer Devices) 148
+ * # 0x09, 0x01, // Usage (Consumer Control) 150
+ * # 0xa1, 0x01, // Collection (Application) 152
+ * # 0x85, 0x04, // Report ID (4) 154
+ * # 0x19, 0x01, // Usage Minimum (1) 156
+ * # 0x2a, 0x9c, 0x02, // Usage Maximum (668) 158
+ * # 0x15, 0x01, // Logical Minimum (1) 161
+ * # 0x26, 0x9c, 0x02, // Logical Maximum (668) 163
+ * # 0x95, 0x01, // Report Count (1) 166
+ * # 0x75, 0x10, // Report Size (16) 168
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 170
+ * # 0xc0, // End Collection 172
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 173
+ * # 0x09, 0x80, // Usage (System Control) 175
+ * # 0xa1, 0x01, // Collection (Application) 177
+ * # 0x85, 0x05, // Report ID (5) 179
+ * # 0x19, 0x81, // Usage Minimum (129) 181
+ * # 0x29, 0x83, // Usage Maximum (131) 183
+ * # 0x15, 0x00, // Logical Minimum (0) 185
+ * # 0x25, 0x01, // Logical Maximum (1) 187
+ * # 0x75, 0x01, // Report Size (1) 189
+ * # 0x95, 0x03, // Report Count (3) 191
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 193
+ * # 0x95, 0x05, // Report Count (5) 195
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 197
+ * # 0xc0, // End Collection 199
+ * #
+ * R: 200 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 19 01 2a 9c 02 15 01 26 9c 02 95 01 75 10 81 00 c0 05 01 09 80 a1 01 85 05 19 81 29 83 15 00 25 01 75 01 95 03 81 02 95 05 81 01 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ *
+ *
+ * VENDOR MODE
+ * HUION_FIRMWARE_ID="HUION_M22d_241101"
+ * HUION_MAGIC_BYTES="1403201101ac9900ff3fd81305080080083c4010"
+ *
+ * MAGIC BYTES
+ * [LogicalMaximum, X ] [LogicalMaximum, Y ] [LogicalMaximum, Pressure] [ LPI]
+ * 14 03 [ 20 11 01] [ ac 99 00] [ ff 3f] [d8 13] 05 08 00 80 08 3c 40 10
+ *
+ * See Huion__Kamvas13Gen3.bpf.c for more details on detailed button/dial reports and caveats. It's very
+ * similar to the Kamvas 16 Gen 3.
+ */
+
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+char EXPECTED_FIRMWARE_ID[] = "HUION_M22d_";
+
+__u8 last_button_state;
+
+static const __u8 disabled_rdesc_tablet[] = {
+ FixedSizeVendorReport(28) /* Input report 4 */
+};
+
+static const __u8 disabled_rdesc_wheel[] = {
+ FixedSizeVendorReport(9) /* Input report 17 */
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionPhysical(
+ /*
+ * I have only examined the tablet's behavior while using
+ * the PW600L pen, which does not have an eraser.
+ * Because of this, I don't know where the Eraser and Invert
+ * bits will go, or if they work as one would expect.
+ *
+ * For the time being, there is no expectation that a pen
+ * with an eraser will work without modifications here.
+ */
+ ReportSize(1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(3)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ Input(Var|Abs)
+ PushPop(
+ ReportCount(1)
+ UsagePage_Button
+ Usage_i8(0x4a) /* (BTN_STYLUS3 + 1) & 0xff */
+ Input(Var|Abs)
+ )
+ ReportCount(3)
+ Input(Const)
+ ReportCount(1)
+ Usage_Dig_InRange
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-2)
+ LogicalMinimum_i16(0)
+ PhysicalMinimum_i16(0)
+ /*
+ * The tablet has a logical maximum of 69920 x 39340
+ * and a claimed resolution of 5080 LPI (200 L/mm)
+ * This works out to a physical maximum of
+ * 349.6 x 196.7mm, which matches Huion's advertised
+ * (rounded) active area dimensions from
+ * https://www.huion.com/products/pen_display/Kamvas/kamvas-16-gen-3.html
+ *
+ * The Kamvas uses data[8] for the 3rd byte of the X-axis, and adding
+ * that after data[2] and data[3] makes a contiguous little-endian
+ * 24-bit value. (See BPF_PROG below)
+ */
+ ReportSize(24)
+ LogicalMaximum_i32(69920)
+ PhysicalMaximum_i16(3496)
+ Usage_GD_X
+ Input(Var|Abs)
+ ReportSize(16)
+ LogicalMaximum_i16(39340)
+ PhysicalMaximum_i16(1967)
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ ReportSize(16)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(16383)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ ReportSize(8)
+ ReportCount(1)
+ Input(Const)
+ ReportCount(2)
+ PushPop(
+ Unit(deg)
+ UnitExponent(0)
+ LogicalMinimum_i8(-60)
+ PhysicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
+ PhysicalMaximum_i8(60)
+ Usage_Dig_XTilt
+ Usage_Dig_YTilt
+ Input(Var|Abs)
+ )
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(CUSTOM_PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /*
+ * The first 3 bytes are somewhat vestigial and will
+ * always be set to zero. Their presence here is needed
+ * to ensure that this device will be detected as a
+ * tablet pad by software that otherwise wouldn't know
+ * any better.
+ */
+ /* (data[1] & 0x01) barrel switch */
+ ReportSize(1)
+ ReportCount(1)
+ Usage_Dig_BarrelSwitch
+ Input(Var|Abs)
+ ReportCount(7)
+ Input(Const)
+ /* data[2] X */
+ /* data[3] Y */
+ ReportSize(8)
+ ReportCount(2)
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ Input(Var|Abs)
+ /*
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6
+ * (data[4] & 0x40) button 7 (top wheel button)
+ * (data[4] & 0x80) button 8 (bottom wheel button)
+ */
+ ReportSize(1)
+ ReportCount(8)
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(8)
+ Input(Var|Abs)
+ /* data[5] top wheel (signed, positive clockwise) */
+ ReportSize(8)
+ ReportCount(1)
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ Input(Var|Rel)
+ /* data[6] bottom wheel (signed, positive clockwise) */
+ UsagePage_Consumer
+ Usage_Con_ACPan
+ Input(Var|Rel)
+ )
+ /*
+ * The kernel will drop reports that are bigger than the
+ * largest report specified in the HID descriptor.
+ * Therefore, our modified descriptor needs to have at least one
+ * HID report that is as long as, or longer than, the largest
+ * report in the original descriptor.
+ *
+ * This macro expands to a no-op report that is padded to the
+ * provided length.
+ */
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc_huion_kamvas16_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hid_ctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+
+ if (have_fw_id) {
+ /*
+ * Tablet should be in vendor mode.
+ * Disable the unused devices
+ */
+ if (rdesc_size == TABLET_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_tablet,
+ sizeof(disabled_rdesc_tablet));
+ return sizeof(disabled_rdesc_tablet);
+ }
+
+ if (rdesc_size == WHEEL_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_wheel,
+ sizeof(disabled_rdesc_wheel));
+ return sizeof(disabled_rdesc_wheel);
+ }
+ }
+
+ /*
+ * Regardless of which mode the tablet is in, always fix the vendor
+ * descriptor in case the udev property just happened to not be set
+ */
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(hid_fix_event_huion_kamvas16_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, VENDOR_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Handle vendor reports only */
+ if (hid_ctx->size != VENDOR_REPORT_LENGTH)
+ return 0;
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ __u8 report_subtype = (data[1] >> 4) & 0x0f;
+
+ if (report_subtype == VENDOR_REPORT_SUBTYPE_PEN ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_PEN_OUT) {
+ /* Invert Y tilt */
+ data[11] = -data[11];
+
+ /*
+ * Rearrange the bytes of the report so that
+ * [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
+ * will be arranged as
+ * [0, 1, 2, 3, 8, 4, 5, 6, 7, 9, 10, 11, 12, 13]
+ */
+ __u8 x_24 = data[8];
+
+ data[8] = data[7];
+ data[7] = data[6];
+ data[6] = data[5];
+ data[5] = data[4];
+
+ data[4] = x_24;
+
+ } else if (report_subtype == VENDOR_REPORT_SUBTYPE_BUTTONS ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_WHEELS) {
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 padding:7;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __s8 top_wheel;
+ __s8 bottom_wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __s8 top_wheel = 0;
+ __s8 bottom_wheel = 0;
+
+ switch (report_subtype) {
+ case VENDOR_REPORT_SUBTYPE_WHEELS:
+ /*
+ * The wheel direction byte is 1 for clockwise rotation
+ * and 2 for counter-clockwise.
+ * Change it to 1 and -1, respectively.
+ */
+ switch (data[3]) {
+ case 1:
+ top_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ case 2:
+ bottom_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ }
+ break;
+
+ case VENDOR_REPORT_SUBTYPE_BUTTONS:
+ /*
+ * If a button is already being held, ignore any new
+ * button event unless it's a release.
+ *
+ * The tablet only cleanly handles one button being held
+ * at a time, and trying to hold multiple buttons
+ * (particularly wheel+pad buttons) can result in sequences
+ * of reports that look like imaginary presses and releases.
+ *
+ * This is an imperfect way to filter out some of these
+ * reports.
+ */
+ if (last_button_state != 0x00 && data[4] != 0x00)
+ break;
+
+ last_button_state = data[4];
+ break;
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = CUSTOM_PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->top_wheel = top_wheel;
+ pad_report->bottom_wheel = bottom_wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(huion_kamvas16_gen3) = {
+ .hid_device_event = (void *)hid_fix_event_huion_kamvas16_gen3,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_huion_kamvas16_gen3,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case VENDOR_DESCRIPTOR_LENGTH:
+ case TABLET_DESCRIPTOR_LENGTH:
+ case WHEEL_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c b/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c
new file mode 100644
index 000000000000..b17719d6d9c7
--- /dev/null
+++ b/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Curran Muhlberger
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_LOGITECH 0x046D
+#define PID_SPACENAVIGATOR 0xC626
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_LOGITECH, PID_SPACENAVIGATOR)
+);
+
+/*
+ * The 3Dconnexion SpaceNavigator 3D Mouse is a multi-axis controller with 6
+ * axes (grouped as X,Y,Z and Rx,Ry,Rz). Axis data is absolute, but the report
+ * descriptor erroneously declares it to be relative. We fix the report
+ * descriptor to mark both axis collections as absolute.
+ *
+ * The kernel attempted to fix this in commit 24985cf68612 (HID: support
+ * Logitech/3DConnexion SpaceTraveler and SpaceNavigator), but the descriptor
+ * data offsets are incorrect for at least some SpaceNavigator units.
+ */
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Offset of Input item in X,Y,Z and Rx,Ry,Rz collections for all known
+ * firmware variants.
+ * - 2009 model: X,Y,Z @ 32-33, Rx,Ry,Rz @ 49-50 (fixup originally
+ * applied in kernel)
+ * - 2016 model (size==228): X,Y,Z @ 36-37, Rx,Ry,Rz @ 53-54
+ *
+ * The descriptor size of the 2009 model is not known, and there is evidence
+ * for at least two other variants (with sizes 202 & 217) besides the 2016
+ * model, so we try all known offsets regardless of descriptor size.
+ */
+ const u8 offsets[] = {32, 36, 49, 53};
+
+ for (size_t idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
+ u8 offset = offsets[idx];
+
+ /* if Input (Data,Var,Rel) , make it Input (Data,Var,Abs) */
+ if (data[offset] == 0x81 && data[offset + 1] == 0x06)
+ data[offset + 1] = 0x02;
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(logitech_spacenavigator) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /* Ensure report descriptor size matches one of the known variants. */
+ if (ctx->rdesc_size != 202 &&
+ ctx->rdesc_size != 217 &&
+ ctx->rdesc_size != 228) {
+ ctx->retval = -EINVAL;
+ return 0;
+ }
+
+ /* Check whether the kernel has already applied the fix. */
+ if ((ctx->rdesc[32] == 0x81 && ctx->rdesc[33] == 0x02 &&
+ ctx->rdesc[49] == 0x81 && ctx->rdesc[50] == 0x02) ||
+ (ctx->rdesc[36] == 0x81 && ctx->rdesc[37] == 0x02 &&
+ ctx->rdesc[53] == 0x81 && ctx->rdesc[54] == 0x02))
+ ctx->retval = -EINVAL;
+ else
+ ctx->retval = 0;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c b/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c
new file mode 100644
index 000000000000..156d75af516d
--- /dev/null
+++ b/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Red Hat
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_WALTOP 0x172F
+#define PID_BATTERYLESS_TABLET 0x0505
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_WALTOP, PID_BATTERYLESS_TABLET)
+);
+
+#define EXPECTED_RDESC_SIZE 335
+#define PEN_REPORT_ID 16
+
+#define TIP_SWITCH BIT(0)
+#define BARREL_SWITCH BIT(1)
+#define SECONDARY_BARREL_SWITCH BIT(5)
+
+static __u8 last_button_state;
+
+static const __u8 fixed_rdesc[] = {
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x02, // Usage (Mouse)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x01, // Report ID (1)
+ 0x09, 0x01, // Usage (Pointer)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x05, 0x09, // Usage Page (Button)
+ 0x19, 0x01, // Usage Minimum (1)
+ 0x29, 0x05, // Usage Maximum (5)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x05, // Report Count (5)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x75, 0x03, // Report Size (3)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x03, // Input (Cnst,Var,Abs)
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x30, // Usage (X)
+ 0x09, 0x31, // Usage (Y)
+ 0x09, 0x38, // Usage (Wheel)
+ 0x15, 0x81, // Logical Minimum (-127)
+ 0x25, 0x7f, // Logical Maximum (127)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x03, // Report Count (3)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0x05, 0x0c, // Usage Page (Consumer)
+ 0x15, 0x81, // Logical Minimum (-127)
+ 0x25, 0x7f, // Logical Maximum (127)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0x0a, 0x38, 0x02, // Usage (AC Pan)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0xc0, // End Collection
+ 0xc0, // End Collection
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x02, // Usage (Pen)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x02, // Report ID (2)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x09, // Report Count (9)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x09, 0x3f, // Usage (Azimuth)
+ 0x09, 0x40, // Usage (Altitude)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x05, // Report ID (5)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x07, // Report Count (7)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x0a, // Report ID (10)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x07, // Report Count (7)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x10, // Report ID (16)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x42, // Usage (Tip Switch)
+ 0x09, 0x44, // Usage (Barrel Switch)
+ 0x09, 0x3c, // Usage (Invert)
+ 0x09, 0x45, // Usage (Eraser)
+ 0x09, 0x32, // Usage (In Range)
+ 0x09, 0x5a, // Usage (Secondary Barrel Switch) <-- added
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x06, // Report Count (6) <--- changed from 5
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x95, 0x02, // Report Count (2) <--- changed from 3
+ 0x81, 0x03, // Input (Cnst,Var,Abs)
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x30, // Usage (X)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x01, // Report Count (1)
+ 0xa4, // Push
+ 0x55, 0x0d, // Unit Exponent (-3)
+ 0x65, 0x33, // Unit (EnglishLinear: in³)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0x00, 0x7d, // Logical Maximum (32000)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0x00, 0x7d, // Physical Maximum (32000)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x09, 0x31, // Usage (Y)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0x20, 0x4e, // Logical Maximum (20000)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0x20, 0x4e, // Physical Maximum (20000)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x30, // Usage (Tip Pressure)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x07, // Logical Maximum (2047)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0xff, 0x07, // Physical Maximum (2047)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x3d, // Usage (X Tilt)
+ 0x09, 0x3e, // Usage (Y Tilt)
+ 0x15, 0xc4, // Logical Minimum (-60) <- changed from -127
+ 0x25, 0x3c, // Logical Maximum (60) <- changed from 127
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0xc0, // End Collection
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x06, // Usage (Keyboard)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x0d, // Report ID (13)
+ 0x05, 0x07, // Usage Page (Keyboard/Keypad)
+ 0x19, 0xe0, // Usage Minimum (224)
+ 0x29, 0xe7, // Usage Maximum (231)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x08, // Report Count (8)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x01, // Input (Cnst,Arr,Abs)
+ 0x05, 0x07, // Usage Page (Keyboard/Keypad)
+ 0x19, 0x00, // Usage Minimum (0)
+ 0x29, 0x65, // Usage Maximum (101)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x65, // Logical Maximum (101)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x05, // Report Count (5)
+ 0x81, 0x00, // Input (Data,Arr,Abs)
+ 0xc0, // End Collection
+ 0x05, 0x0c, // Usage Page (Consumer)
+ 0x09, 0x01, // Usage (Consumer Control)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x0c, // Report ID (12)
+ 0x09, 0xe9, // Usage (Volume Increment)
+ 0x09, 0xea, // Usage (Volume Decrement)
+ 0x09, 0xe2, // Usage (Mute)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x03, // Report Count (3)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0x75, 0x05, // Report Size (5)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x07, // Input (Cnst,Var,Rel)
+ 0xc0, // End Collection
+};
+
+static inline unsigned int bitwidth32(__u32 x)
+{
+ return 32 - __builtin_clzg(x, 32);
+}
+
+static inline unsigned int floor_log2_32(__u32 x)
+{
+ return bitwidth32(x) - 1;
+}
+
+/* Maps the interval [0, 2047] to itself using a scaled
+ * approximation of the function log2(x+1).
+ */
+static unsigned int scaled_log2(__u16 v)
+{
+ const unsigned int XMAX = 2047;
+ const unsigned int YMAX = 11; /* log2(2048) = 11 */
+
+ unsigned int x = v + 1;
+ unsigned int n = floor_log2_32(x);
+ unsigned int b = 1 << n;
+
+ /* Fixed-point fraction in [0, 1), linearly
+ * interpolated using delta-y = 1 and
+ * delta-x = (2b - b) = b.
+ */
+ unsigned int frac = (x - b) << YMAX;
+ unsigned int lerp = frac / b;
+ unsigned int log2 = (n << YMAX) + lerp;
+
+ return ((log2 * XMAX) / YMAX) >> YMAX;
+}
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+
+ return sizeof(fixed_rdesc);
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(waltop_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __u8 report_id = data[0];
+
+ if (report_id != PEN_REPORT_ID)
+ return 0;
+
+ /* On this tablet if the secondary barrel switch is pressed,
+ * the tablet sends tip down and barrel down. Change this to
+ * just secondary barrel down when there is no ambiguity.
+ *
+ * It's possible that there is a bug in the firmware and the
+ * device intends to set invert + eraser instead (i.e. the
+ * pysical button is an eraser button) but since
+ * the pressure is always zero, said eraser button
+ * would be useless anyway.
+ *
+ * So let's just change the button to secondary barrel down.
+ */
+
+ __u8 tip_switch = data[1] & TIP_SWITCH;
+ __u8 barrel_switch = data[1] & BARREL_SWITCH;
+
+ __u8 tip_held = last_button_state & TIP_SWITCH;
+ __u8 barrel_held = last_button_state & BARREL_SWITCH;
+
+ if (tip_switch && barrel_switch && !tip_held && !barrel_held) {
+ data[1] &= ~(TIP_SWITCH | BARREL_SWITCH); /* release tip and barrel */
+ data[1] |= SECONDARY_BARREL_SWITCH; /* set secondary barrel switch */
+ }
+
+ last_button_state = data[1];
+
+ /* The pressure sensor on this tablet maps around half of the
+ * logical pressure range into the interval [0-100]. Further
+ * pressure causes the sensor value to increase exponentially
+ * up to a maximum value of 2047.
+ *
+ * The values 12 and 102 were chosen to have an integer slope
+ * with smooth transition between the two curves around the
+ * value 100.
+ */
+
+ __u16 pressure = (((__u16)data[6]) << 0) | (((__u16)data[7]) << 8);
+
+ if (pressure <= 102)
+ pressure *= 12;
+ else
+ pressure = scaled_log2(pressure);
+
+ data[6] = pressure >> 0;
+ data[7] = pressure >> 8;
+
+ return 0;
+}
+
+HID_BPF_OPS(waltop_batteryless) = {
+ .hid_device_event = (void *)waltop_fix_events,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ if (ctx->rdesc_size == EXPECTED_RDESC_SIZE)
+ ctx->retval = 0;
+ else
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c b/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c
new file mode 100644
index 000000000000..2502fcc9ede6
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Red Hat
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
+#define PID_DECO_01_V3 0x0947
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_DECO_01_V3),
+);
+
+/*
+ * Default report descriptor reports:
+ * - a report descriptor for the pad buttons, reported as key sequences
+ * - a report descriptor for the pen
+ * - a vendor-specific report descriptor
+ *
+ * The Pad report descriptor, see
+ * https://gitlab.freedesktop.org/libevdev/udev-hid-bpf/-/issues/54
+ *
+ * # Report descriptor length: 102 bytes
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x09, // Report ID (9) 6
+ * 0x09, 0x01, // Usage (Pointer) 8
+ * 0xa1, 0x00, // Collection (Physical) 10
+ * 0x05, 0x09, // Usage Page (Button) 12
+ * 0x19, 0x01, // UsageMinimum (1) 14
+ * 0x29, 0x03, // UsageMaximum (3) 16
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x25, 0x01, // Logical Maximum (1) 20
+ * 0x95, 0x03, // Report Count (3) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x95, 0x05, // Report Count (5) 28
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 30
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 32
+ * 0x09, 0x30, // Usage (X) 34
+ * 0x09, 0x31, // Usage (Y) 36
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 38
+ * 0x95, 0x02, // Report Count (2) 41
+ * 0x75, 0x10, // Report Size (16) 43
+ * 0x81, 0x02, // Input (Data,Var,Abs) 45
+ * 0x05, 0x0d, // Usage Page (Digitizers) 47
+ * 0x09, 0x30, // Usage (Tip Pressure) 49
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 51
+ * 0x95, 0x01, // Report Count (1) 54
+ * 0x75, 0x10, // Report Size (16) 56
+ * 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * 0xc0, // End Collection 60
+ * 0xc0, // End Collection 61
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 62
+ * 0x09, 0x06, // Usage (Keyboard) 64
+ * 0xa1, 0x01, // Collection (Application) 66
+ * 0x85, 0x06, // Report ID (6) 68
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 70
+ * 0x19, 0xe0, // UsageMinimum (224) 72
+ * 0x29, 0xe7, // UsageMaximum (231) 74
+ * 0x15, 0x00, // Logical Minimum (0) 76
+ * 0x25, 0x01, // Logical Maximum (1) 78
+ * 0x75, 0x01, // Report Size (1) 80
+ * 0x95, 0x08, // Report Count (8) 82
+ * 0x81, 0x02, // Input (Data,Var,Abs) 84
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 86
+ * 0x19, 0x00, // UsageMinimum (0) 88
+ * 0x29, 0xff, // UsageMaximum (255) 90
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 92
+ * 0x75, 0x08, // Report Size (8) 95
+ * 0x95, 0x06, // Report Count (6) 97
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 99
+ * 0xc0, // End Collection 101
+ *
+ * And key events for buttons top->bottom are:
+ * Buttons released: 06 00 00 00 00 00 00 00
+ * Button1: 06 00 05 00 00 00 00 00 -> b
+ * Button2: 06 00 08 00 00 00 00 00 -> e
+ * Button3: 06 04 00 00 00 00 00 00 -> LAlt
+ * Button4: 06 00 2c 00 00 00 00 00 -> Space
+ * Button5: 06 01 16 00 00 00 00 00 -> LControl + s
+ * Button6: 06 01 1d 00 00 00 00 00 -> LControl + z
+ * Button7: 06 01 57 00 00 00 00 00 -> LControl + Keypad Plus
+ * Button8: 06 01 56 00 00 00 00 00 -> LControl + Keypad Dash
+ *
+ * When multiple buttons are pressed at the same time, the values used to
+ * identify the buttons are identical, but they appear in different bytes of the
+ * record. For example, when button 2 (0x08) and button 1 (0x05) are pressed,
+ * this is the report:
+ *
+ * Buttons 2 and 1: 06 00 08 05 00 00 00 00 -> e + b
+ *
+ * Buttons 1, 2, 4, 5 and 6 can be matched by finding their values in the
+ * report.
+ *
+ * Button 3 is pressed when the 3rd bit is 1. For example, pressing buttons 3
+ * and 5 generates this report:
+ *
+ * Buttons 3 and 5: 06 05 16 00 00 00 00 00 -> LControl + LAlt + s
+ * -- --
+ * | |
+ * | `- Button 5 (0x16)
+ * `- 0x05 = 0101. Button 3 is pressed
+ * ^
+ *
+ * pad_buttons contains a list of buttons that can be matched in
+ * HID_BPF_DEVICE_EVENT. Button 3 as it has a dedicated bit.
+ *
+ *
+ * The Pen report descriptor announces a wrong tilt range:
+ *
+ * Report descriptor length: 109 bytes
+ * 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * 0x09, 0x02, // Usage (Pen) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x07, // Report ID (7) 6
+ * 0x09, 0x20, // Usage (Stylus) 8
+ * 0xa1, 0x01, // Collection (Application) 10
+ * 0x09, 0x42, // Usage (Tip Switch) 12
+ * 0x09, 0x44, // Usage (Barrel Switch) 14
+ * 0x09, 0x45, // Usage (Eraser) 16
+ * 0x09, 0x3c, // Usage (Invert) 18
+ * 0x15, 0x00, // Logical Minimum (0) 20
+ * 0x25, 0x01, // Logical Maximum (1) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x95, 0x04, // Report Count (4) 26
+ * 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * 0x95, 0x01, // Report Count (1) 30
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 32
+ * 0x09, 0x32, // Usage (In Range) 34
+ * 0x95, 0x01, // Report Count (1) 36
+ * 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * 0x95, 0x02, // Report Count (2) 40
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 42
+ * 0x75, 0x10, // Report Size (16) 44
+ * 0x95, 0x01, // Report Count (1) 46
+ * 0x35, 0x00, // Physical Minimum (0) 48
+ * 0xa4, // Push 50
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 51
+ * 0x09, 0x30, // Usage (X) 53
+ * 0x65, 0x13, // Unit (EnglishLinear: in) 55
+ * 0x55, 0x0d, // Unit Exponent (-3) 57
+ * 0x46, 0x10, 0x27, // Physical Maximum (10000) 59
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 62
+ * 0x81, 0x02, // Input (Data,Var,Abs) 65
+ * 0x09, 0x31, // Usage (Y) 67
+ * 0x46, 0x6a, 0x18, // Physical Maximum (6250) 69
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 72
+ * 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * 0xb4, // Pop 77
+ * 0x09, 0x30, // Usage (X) 78
+ * 0x45, 0x00, // Physical Maximum (0) 80
+ * 0x26, 0xff, 0x3f, // Logical Maximum (16383) 82
+ * 0x81, 0x42, // Input (Data,Var,Abs,Null) 85
+ * 0x09, 0x3d, // Usage (Start) 87
+ * 0x15, 0x81, // Logical Minimum (-127) 89 <- Change from -127 to -60
+ * 0x25, 0x7f, // Logical Maximum (127) 91 <- Change from 127 to 60
+ * 0x75, 0x08, // Report Size (8) 93
+ * 0x95, 0x01, // Report Count (1) 95
+ * 0x81, 0x02, // Input (Data,Var,Abs) 97
+ * 0x09, 0x3e, // Usage (Select) 99
+ * 0x15, 0x81, // Logical Minimum (-127) 101 <- Change from -127 to -60
+ * 0x25, 0x7f, // Logical Maximum (127) 103 <- Change from 127 to 60
+ * 0x81, 0x02, // Input (Data,Var,Abs) 105
+ * 0xc0, // End Collection 107
+ * 0xc0, // End Collection 108
+ */
+
+#define PEN_REPORT_DESCRIPTOR_LENGTH 109
+#define PAD_REPORT_DESCRIPTOR_LENGTH 102
+#define PAD_REPORT_LENGTH 8
+#define PAD_REPORT_ID 6
+#define PAD_NUM_BUTTONS 8
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0 in report is the report ID
+ ReportId(PAD_REPORT_ID)
+ ReportCount(1)
+ ReportSize(8)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(PAD_NUM_BUTTONS)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(PAD_NUM_BUTTONS)
+ ReportSize(1)
+ Input(Var|Abs)
+ // Byte 2 in report - just exists so we get to be a tablet pad
+ UsagePage_Digitizers
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 3/4 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 5-7 are padding so we match the original report lengtth
+ ReportCount(3)
+ ReportSize(8)
+ Input(Const)
+ )
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(xppen_deco01v3_rdesc_fixup, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+
+ const __u8 wrong_logical_range[] = {0x15, 0x81, 0x25, 0x7f};
+ const __u8 correct_logical_range[] = {0x15, 0xc4, 0x25, 0x3c};
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ switch (hctx->size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ if (__builtin_memcmp(&data[89], wrong_logical_range,
+ sizeof(wrong_logical_range)) == 0)
+ __builtin_memcpy(&data[89], correct_logical_range,
+ sizeof(correct_logical_range));
+ if (__builtin_memcmp(&data[101], wrong_logical_range,
+ sizeof(wrong_logical_range)) == 0)
+ __builtin_memcpy(&data[101], correct_logical_range,
+ sizeof(correct_logical_range));
+ break;
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(xppen_deco01v3_device_event, struct hid_bpf_ctx *hctx)
+{
+ static const __u8 pad_buttons[] = { 0x05, 0x08, 0x00, 0x2c, 0x16, 0x1d, 0x57, 0x56 };
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] == PAD_REPORT_ID) {
+ __u8 button_mask = 0;
+ size_t d, b;
+
+ /* data[1] stores the status of BTN_2 in the 3rd bit*/
+ if (data[1] & BIT(2))
+ button_mask |= BIT(2);
+
+ /* The rest of the descriptor stores the buttons as in pad_buttons */
+ for (d = 2; d < 8; d++) {
+ for (b = 0; b < sizeof(pad_buttons); b++) {
+ if (data[d] != 0 && data[d] == pad_buttons[b])
+ button_mask |= BIT(b);
+ }
+ }
+
+ __u8 report[8] = {PAD_REPORT_ID, button_mask, 0x00};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ }
+ return 0;
+}
+
+HID_BPF_OPS(xppen_deco01v3) = {
+ .hid_rdesc_fixup = (void *)xppen_deco01v3_rdesc_fixup,
+ .hid_device_event = (void *)xppen_deco01v3_device_event,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c b/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c
new file mode 100644
index 000000000000..4b2549031e56
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD
+#define PID_DECO_02 0x0803
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_DECO_02),
+);
+
+/*
+ * Devices are:
+ * - Pad input, including pen (This is the only one we are interested in)
+ * - Pen input as mouse
+ * - Vendor
+ *
+ * Descriptors on main device are:
+ * - 7: Pen
+ * - 6: Vendor settings? Unclear
+ * - 3: Keyboard (This is what we want to modify)
+ * - 5: Feature report
+ *
+ * This creates three event nodes:
+ * - XP-PEN DECO 02 Stylus
+ * - XP-PEN DECO 02
+ * - XP-PEN DECO 02 Keyboard (Again, what we want to modify)
+ *
+ * # Report descriptor length: 188 bytes
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x07, // Report ID (7) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x00, // Collection (Physical) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x32, // Usage (In Range) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x05, // Report Count (5) 28
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x95, 0x03, // Report Count (3) 32
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 34
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 36
+ * # 0x09, 0x30, // Usage (X) 38
+ * # 0x15, 0x00, // Logical Minimum (0) 40
+ * # 0x26, 0x50, 0x57, // Logical Maximum (22352) 42
+ * # 0x55, 0x0d, // Unit Exponent (-3) 45
+ * # 0x65, 0x13, // Unit (EnglishLinear: in) 47
+ * # 0x35, 0x00, // Physical Minimum (0) 49
+ * # 0x46, 0x50, 0x57, // Physical Maximum (22352) 51
+ * # 0x75, 0x10, // Report Size (16) 54
+ * # 0x95, 0x01, // Report Count (1) 56
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * # 0x09, 0x31, // Usage (Y) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 62
+ * # 0x26, 0x92, 0x36, // Logical Maximum (13970) 64
+ * # 0x55, 0x0d, // Unit Exponent (-3) 67
+ * # 0x65, 0x13, // Unit (EnglishLinear: in) 69
+ * # 0x35, 0x00, // Physical Minimum (0) 71
+ * # 0x46, 0x92, 0x36, // Physical Maximum (13970) 73
+ * # 0x75, 0x10, // Report Size (16) 76
+ * # 0x95, 0x01, // Report Count (1) 78
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 80
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 82
+ * # 0x09, 0x30, // Usage (Tip Pressure) 84
+ * # 0x15, 0x00, // Logical Minimum (0) 86
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 88
+ * # 0x75, 0x10, // Report Size (16) 91
+ * # 0x95, 0x01, // Report Count (1) 93
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 95
+ * # 0xc0, // End Collection 97
+ * # 0xc0, // End Collection 98
+ * # 0x09, 0x0e, // Usage (Device Configuration) 99
+ * # 0xa1, 0x01, // Collection (Application) 101
+ * # 0x85, 0x05, // Report ID (5) 103
+ * # 0x09, 0x23, // Usage (Device Settings) 105
+ * # 0xa1, 0x02, // Collection (Logical) 107
+ * # 0x09, 0x52, // Usage (Inputmode) 109
+ * # 0x09, 0x53, // Usage (Device Index) 111
+ * # 0x25, 0x0a, // Logical Maximum (10) 113
+ * # 0x75, 0x08, // Report Size (8) 115
+ * # 0x95, 0x02, // Report Count (2) 117
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 119
+ * # 0xc0, // End Collection 121
+ * # 0xc0, // End Collection 122
+ * # 0x05, 0x0c, // Usage Page (Consumer Devices) 123
+ * # 0x09, 0x36, // Usage (Function Buttons) 125
+ * # 0xa1, 0x00, // Collection (Physical) 127
+ * # 0x85, 0x06, // Report ID (6) 129
+ * # 0x05, 0x09, // Usage Page (Button) 131
+ * # 0x19, 0x01, // Usage Minimum (1) 133
+ * # 0x29, 0x20, // Usage Maximum (32) 135
+ * # 0x15, 0x00, // Logical Minimum (0) 137
+ * # 0x25, 0x01, // Logical Maximum (1) 139
+ * # 0x95, 0x20, // Report Count (32) 141
+ * # 0x75, 0x01, // Report Size (1) 143
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 145
+ * # 0xc0, // End Collection 147
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 148
+ * # 0x09, 0x06, // Usage (Keyboard) 150
+ * # 0xa1, 0x01, // Collection (Application) 152
+ * # 0x85, 0x03, // Report ID (3) 154
+ * # 0x05, 0x07, // Usage Page (Keyboard) 156
+ * # 0x19, 0xe0, // Usage Minimum (224) 158
+ * # 0x29, 0xe7, // Usage Maximum (231) 160
+ * # 0x15, 0x00, // Logical Minimum (0) 162
+ * # 0x25, 0x01, // Logical Maximum (1) 164
+ * # 0x75, 0x01, // Report Size (1) 166
+ * # 0x95, 0x08, // Report Count (8) 168
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 170
+ * # 0x05, 0x07, // Usage Page (Keyboard) 172
+ * # 0x19, 0x00, // Usage Minimum (0) 174
+ * # 0x29, 0xff, // Usage Maximum (255) 176
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 178
+ * # 0x75, 0x08, // Report Size (8) 181
+ * # 0x95, 0x06, // Report Count (6) 183
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 185
+ * # 0xc0, // End Collection 187
+ *
+ * Key events; top to bottom:
+ * Buttons released: 03 00 00 00 00 00 00 00
+ * Button1: 03 00 05 00 00 00 00 00 -> 'b and B'
+ * Button2: 03 00 2c 00 00 00 00 00 -> 'Spacebar'
+ * Button3: 03 00 08 00 00 00 00 00 -> 'e and E'
+ * Button4: 03 00 0c 00 00 00 00 00 -> 'i and I'
+ * Button5: 03 05 1d 00 00 00 00 00 -> LeftControl + LeftAlt + 'z and Z'
+ * Button6: 03 01 16 00 00 00 00 00 -> LeftControl + 's and S'
+ *
+ * Dial Events:
+ * Clockwise: 03 01 2e 00 00 00 00 00 -> LeftControl + '= and +'
+ * Anticlockwise: 03 01 2d 00 00 00 00 00 -> LeftControl + '- and (underscore)'
+ *
+ * NOTE: Input event descriptions begin at byte 2, and progressively build
+ * towards byte 7 as each new key is pressed maintaining the press order.
+ * For example:
+ * BTN1 followed by BTN2 is 03 00 05 2c 00 00 00 00
+ * BTN2 followed by BTN1 is 03 00 2c 05 00 00 00 00
+ *
+ * Releasing a button causes its byte to be freed, and the next item in the list
+ * is pushed forwards. Dial events are released immediately after an event is
+ * registered (i.e. after each "click"), so will continually appear pushed
+ * backwards in the report.
+ *
+ * When a button with a modifier key is pressed, the button identifier stacks in
+ * an abnormal way, where the highest modifier byte always supersedes others.
+ * In these cases, the button with the higher modifier is always last.
+ * For example:
+ * BTN6 followed by BTN5 is 03 05 1d 16 00 00 00 00
+ * BTN5 followed by BTN6 is 03 05 1d 16 00 00 00 00
+ * BTN5 followed by BTN1 is 03 05 05 1d 00 00 00 00
+ *
+ * For three button presses in order, demonstrating strictly above rules:
+ * BTN6, BTN1, BTN5 is 03 05 05 1d 16 00 00 00
+ * BTN5, BTN1, BTN6 is 03 05 05 1d 16 00 00 00
+ *
+ * In short, when BTN5/6 are pressed, the order of operations is lost, as they
+ * will always float to the end when pressed in combination with others.
+ *
+ * Fortunately, all states are recorded in the same way, with no overlaps.
+ * Byte 1 can be used as a spare for the wheel, since this is for mod keys.
+ */
+
+#define RDESC_SIZE_PAD 188
+#define REPORT_SIZE_PAD 8
+#define REPORT_ID_BUTTONS 3
+#define PAD_BUTTON_COUNT 6
+#define RDESC_KEYBOARD_OFFSET 148
+
+static const __u8 fixed_rdesc_pad[] = {
+ /* Copy of pen descriptor to avoid losing functionality */
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(7)
+ Usage_Dig_Stylus
+ CollectionPhysical(
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_Eraser
+ Usage_Dig_Invert
+ Usage_Dig_InRange
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ ReportCount(5)
+ Input(Var|Abs)
+ ReportCount(3)
+ Input(Const) /* Input (Const, Var, Abs) */
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(22352)
+ UnitExponent(-3)
+ Unit(in) /* (EnglishLinear: in) */
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(22352)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ Usage_GD_Y
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(13970)
+ UnitExponent(-3)
+ Unit(in) /* (EnglishLinear: in) */
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(13970)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ )
+ )
+
+ /* FIXES BEGIN */
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(REPORT_ID_BUTTONS) /* Retain original ID on byte 0 */
+ ReportCount(1)
+ ReportSize(REPORT_SIZE_PAD)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /* Byte 1: Dial state */
+ UsagePage_GenericDesktop
+ Usage_GD_Dial
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(REPORT_SIZE_PAD)
+ Input(Var|Rel)
+ /* Byte 2: Button state */
+ UsagePage_Button
+ ReportSize(1)
+ ReportCount(PAD_BUTTON_COUNT)
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(PAD_BUTTON_COUNT) /* Number of buttons */
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ Input(Var|Abs)
+ /* Byte 3: Exists to be tablet pad */
+ UsagePage_Digitizers
+ Usage_Dig_BarrelSwitch
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) /* Padding, to fill full report space */
+ Input(Const)
+ /* Byte 4/5: Exists to be a tablet pad */
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ /* Bytes 6/7: Padding, to match original length */
+ ReportCount(2)
+ ReportSize(8)
+ Input(Const)
+ )
+ FixedSizeVendorReport(RDESC_SIZE_PAD)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(xppen_deco02_rdesc_fixup, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE);
+
+ if (!data)
+ return 0; /* EPERM Check */
+
+ if (hctx->size == RDESC_SIZE_PAD) {
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(xppen_deco02_device_event, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, REPORT_SIZE_PAD);
+
+ if (!data || data[0] != REPORT_ID_BUTTONS)
+ return 0; /* EPERM or wrong report */
+
+ __u8 dial_code = 0;
+ __u8 button_mask = 0;
+ size_t d;
+
+ /* Start from 2; 0 is report ID, 1 is modifier keys, replaced by dial */
+ for (d = 2; d < 8; d++) {
+ switch (data[d]) {
+ case 0x2e:
+ dial_code = 1;
+ break;
+ case 0x2d:
+ dial_code = -1;
+ break;
+ /* below are buttons, top to bottom */
+ case 0x05:
+ button_mask |= BIT(0);
+ break;
+ case 0x2c:
+ button_mask |= BIT(1);
+ break;
+ case 0x08:
+ button_mask |= BIT(2);
+ break;
+ case 0x0c:
+ button_mask |= BIT(3);
+ break;
+ case 0x1d:
+ button_mask |= BIT(4);
+ break;
+ case 0x16:
+ button_mask |= BIT(05);
+ break;
+ default:
+ break;
+ }
+ }
+
+ __u8 report[8] = { REPORT_ID_BUTTONS, dial_code, button_mask, 0x00 };
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return 0;
+}
+
+HID_BPF_OPS(xppen_deco02) = {
+ .hid_rdesc_fixup = (void *)xppen_deco02_rdesc_fixup,
+ .hid_device_event = (void *)xppen_deco02_device_event,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != RDESC_SIZE_PAD ? -EINVAL : 0;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/hid_report_helpers.h b/drivers/hid/bpf/progs/hid_report_helpers.h
index 9b2a48e4a311..9944ff54d31d 100644
--- a/drivers/hid/bpf/progs/hid_report_helpers.h
+++ b/drivers/hid/bpf/progs/hid_report_helpers.h
@@ -143,8 +143,11 @@
* report with Report ID 0xac of the given size in bytes.
* The size is inclusive of the 1 byte Report ID prefix.
*
- * HID-BPF requires that at least one report has
- * the same size as the original report from the device.
+ * The kernel discards any HID reports that are larger
+ * than the largest report in a HID report descriptor.
+ * Thus at least one report must have (at least)
+ * the same size as the largest original report from
+ * the device.
* The easy way to ensure that is to add this
* macro as the last element of your CollectionApplication
* other reports can be of any size less than this.
@@ -295,6 +298,7 @@
#define Usage_GD_SystemSpeakerMute Usage_i8(0xa7)
#define Usage_GD_SystemHibernate Usage_i8(0xa8)
#define Usage_GD_SystemMicrophoneMute Usage_i8(0xa9)
+#define Usage_GD_SystemAccessibilityBinding Usage_i8(0xaa)
#define Usage_GD_SystemDisplayInvert Usage_i8(0xb0)
#define Usage_GD_SystemDisplayInternal Usage_i8(0xb1)
#define Usage_GD_SystemDisplayExternal Usage_i8(0xb2)
@@ -2669,7 +2673,7 @@
#define Usage_BS_iDeviceName Usage_i8(0x88)
#define Usage_BS_iDeviceChemistry Usage_i8(0x89)
#define Usage_BS_ManufacturerData Usage_i8(0x8a)
-#define Usage_BS_Rechargable Usage_i8(0x8b)
+#define Usage_BS_Rechargeable Usage_i8(0x8b)
#define Usage_BS_WarningCapacityLimit Usage_i8(0x8c)
#define Usage_BS_CapacityGranularity1 Usage_i8(0x8d)
#define Usage_BS_CapacityGranularity2 Usage_i8(0x8e)
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
index 772f44d56ccf..3aff4e092132 100644
--- a/drivers/hwmon/cgbc-hwmon.c
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
nb_sensors = data[0];
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ if (!hwmon->sensors)
+ return -ENOMEM;
+
sensor = hwmon->sensors;
for (i = 0; i < nb_sensors; i++) {
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
index 644dc3ca9df7..f81c3bc422f4 100644
--- a/drivers/hwmon/gpd-fan.c
+++ b/drivers/hwmon/gpd-fan.c
@@ -12,9 +12,9 @@
* Copyright (c) 2024 Cryolitia PukNgae
*/
-#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/hwmon.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -276,31 +276,6 @@ static int gpd_generic_read_rpm(void)
return (u16)high << 8 | low;
}
-static void gpd_win4_init_ec(void)
-{
- u8 chip_id, chip_ver;
-
- gpd_ecram_read(0x2000, &chip_id);
-
- if (chip_id == 0x55) {
- gpd_ecram_read(0x1060, &chip_ver);
- gpd_ecram_write(0x1060, chip_ver | 0x80);
- }
-}
-
-static int gpd_win4_read_rpm(void)
-{
- int ret;
-
- ret = gpd_generic_read_rpm();
-
- if (ret == 0)
- // Re-init EC when speed is 0
- gpd_win4_init_ec();
-
- return ret;
-}
-
static int gpd_wm2_read_rpm(void)
{
for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
@@ -320,11 +295,10 @@ static int gpd_wm2_read_rpm(void)
static int gpd_read_rpm(void)
{
switch (gpd_driver_priv.drvdata->board) {
+ case win4_6800u:
case win_mini:
case duo:
return gpd_generic_read_rpm();
- case win4_6800u:
- return gpd_win4_read_rpm();
case win_max_2:
return gpd_wm2_read_rpm();
}
@@ -607,6 +581,28 @@ static struct hwmon_chip_info gpd_fan_chip_info = {
.info = gpd_fan_hwmon_channel_info
};
+static void gpd_win4_init_ec(void)
+{
+ u8 chip_id, chip_ver;
+
+ gpd_ecram_read(0x2000, &chip_id);
+
+ if (chip_id == 0x55) {
+ gpd_ecram_read(0x1060, &chip_ver);
+ gpd_ecram_write(0x1060, chip_ver | 0x80);
+ }
+}
+
+static void gpd_init_ec(void)
+{
+ // The buggy firmware won't initialize EC properly on boot.
+ // Before its initialization, reading RPM will always return 0,
+ // and writing PWM will have no effect.
+ // Initialize it manually on driver load.
+ if (gpd_driver_priv.drvdata->board == win4_6800u)
+ gpd_win4_init_ec();
+}
+
static int gpd_fan_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -615,14 +611,14 @@ static int gpd_fan_probe(struct platform_device *pdev)
const struct device *hwdev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return dev_err_probe(dev, PTR_ERR(res),
+ if (!res)
+ return dev_err_probe(dev, -EINVAL,
"Failed to get platform resource\n");
region = devm_request_region(dev, res->start,
resource_size(res), DRIVER_NAME);
- if (IS_ERR(region))
- return dev_err_probe(dev, PTR_ERR(region),
+ if (!region)
+ return dev_err_probe(dev, -EBUSY,
"Failed to request region\n");
hwdev = devm_hwmon_device_register_with_info(dev,
@@ -631,9 +627,11 @@ static int gpd_fan_probe(struct platform_device *pdev)
&gpd_fan_chip_info,
NULL);
if (IS_ERR(hwdev))
- return dev_err_probe(dev, PTR_ERR(region),
+ return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
+ gpd_init_ec();
+
return 0;
}
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 52cf62e45a86..6bba9b50c51b 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -336,10 +336,9 @@ static int isl68137_probe_from_dt(struct device *dev,
struct isl68137_data *data)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 56834d26f8ef..ef981ed97da8 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -336,18 +336,18 @@ static struct pmbus_driver_info max34440_info[] = {
.format[PSC_CURRENT_IN] = direct,
.format[PSC_CURRENT_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
- .m[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_IN] = 125,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 0,
- .m[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_VOLTAGE_OUT] = 125,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 0,
- .m[PSC_CURRENT_IN] = 1,
+ .m[PSC_CURRENT_IN] = 250,
.b[PSC_CURRENT_IN] = 0,
- .R[PSC_CURRENT_IN] = 2,
- .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_IN] = -1,
+ .m[PSC_CURRENT_OUT] = 250,
.b[PSC_CURRENT_OUT] = 0,
- .R[PSC_CURRENT_OUT] = 2,
+ .R[PSC_CURRENT_OUT] = -1,
.m[PSC_TEMPERATURE] = 1,
.b[PSC_TEMPERATURE] = 0,
.R[PSC_TEMPERATURE] = 2,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 557ad3e7752a..f36c0229328f 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -291,24 +291,26 @@ out:
return data;
}
-static int temp1_input_read(struct device *dev)
+static int temp1_input_read(struct device *dev, long *temp)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->temperature;
+ *temp = data->temperature;
+ return 0;
}
-static int humidity1_input_read(struct device *dev)
+static int humidity1_input_read(struct device *dev, long *humidity)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->humidity;
+ *humidity = data->humidity;
+ return 0;
}
/*
@@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
enum sht3x_limits index;
+ int ret;
switch (type) {
case hwmon_chip:
@@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- *val = temp1_input_read(dev);
- break;
+ return temp1_input_read(dev, val);
case hwmon_temp_alarm:
- *val = temp1_alarm_read(dev);
+ ret = temp1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_temp_max:
index = limit_max;
@@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- *val = humidity1_input_read(dev);
- break;
+ return humidity1_input_read(dev, val);
case hwmon_humidity_alarm:
- *val = humidity1_alarm_read(dev);
+ ret = humidity1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_humidity_max:
index = limit_max;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 75c8d08fa24e..b9f370c9f018 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -118,6 +118,7 @@ struct pca954x {
raw_spinlock_t lock;
struct regulator *supply;
+ struct gpio_desc *reset_gpio;
struct reset_control *reset_cont;
};
@@ -315,25 +316,6 @@ static u8 pca954x_regval(struct pca954x *data, u8 chan)
return 1 << chan;
}
-static void pca954x_reset_assert(struct pca954x *data)
-{
- if (data->reset_cont)
- reset_control_assert(data->reset_cont);
-}
-
-static void pca954x_reset_deassert(struct pca954x *data)
-{
- if (data->reset_cont)
- reset_control_deassert(data->reset_cont);
-}
-
-static void pca954x_reset_mux(struct pca954x *data)
-{
- pca954x_reset_assert(data);
- udelay(1);
- pca954x_reset_deassert(data);
-}
-
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
@@ -347,8 +329,6 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = ret < 0 ? 0 : regval;
}
- if (ret == -ETIMEDOUT && data->reset_cont)
- pca954x_reset_mux(data);
return ret;
}
@@ -358,7 +338,6 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
s32 idle_state;
- int ret = 0;
idle_state = READ_ONCE(data->idle_state);
if (idle_state >= 0)
@@ -368,10 +347,8 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
if (idle_state == MUX_IDLE_DISCONNECT) {
/* Deselect active channel */
data->last_chan = 0;
- ret = pca954x_reg_write(muxc->parent, client,
- data->last_chan);
- if (ret == -ETIMEDOUT && data->reset_cont)
- pca954x_reset_mux(data);
+ return pca954x_reg_write(muxc->parent, client,
+ data->last_chan);
}
/* otherwise leave as-is */
@@ -550,10 +527,29 @@ static int pca954x_get_reset(struct device *dev, struct pca954x *data)
if (IS_ERR(data->reset_cont))
return dev_err_probe(dev, PTR_ERR(data->reset_cont),
"Failed to get reset\n");
+ else if (data->reset_cont)
+ return 0;
+
+ /*
+ * fallback to legacy reset-gpios
+ */
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "Failed to get reset gpio");
+ }
return 0;
}
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+ else
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+}
+
/*
* I2C init/probing/exit functions
*/
@@ -593,7 +589,7 @@ static int pca954x_probe(struct i2c_client *client)
if (ret)
goto fail_cleanup;
- if (data->reset_cont) {
+ if (data->reset_cont || data->reset_gpio) {
udelay(1);
pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 37cd37556510..fab5d914029d 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return ret;
err_free:
+ ib_umem_release(umem);
rdma_restrack_put(&cq->res);
kfree(cq);
err_event_file:
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 4dab5ca7362b..84ce3fce2826 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -913,7 +913,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
-static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
{
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
@@ -933,10 +933,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
- if (rc) {
+ if (rc)
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
- goto fail;
- }
+
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
/* remove from active qp list */
@@ -951,10 +950,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
rdev->gsi_ctx.gsi_sqp = NULL;
rdev->gsi_ctx.gsi_sah = NULL;
rdev->gsi_ctx.sqp_tbl = NULL;
-
- return 0;
-fail:
- return rc;
}
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index d9a12681f843..22d3e25c3b9d 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (umem->length < cq->size) {
ibdev_dbg(&dev->ibdev, "External memory too small\n");
err = -EINVAL;
- goto err_free_mem;
+ goto err_out;
}
if (!ib_umem_is_contiguous(umem)) {
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
err = -EINVAL;
- goto err_free_mem;
+ goto err_out;
}
cq->cpu_addr = NULL;
@@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
- goto err_free_mem;
+ goto err_free_mapped;
resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
@@ -1299,12 +1299,10 @@ err_remove_mmap:
efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
-err_free_mem:
- if (umem)
- ib_umem_release(umem);
- else
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
-
+err_free_mapped:
+ if (!umem)
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.create_cq_err);
return err;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 3a5c93c9fb3e..6aa82fe9dd3d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@@ -37,6 +38,43 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
+void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ return;
+
+ mutex_lock(&cq_table->bank_mutex);
+ cq_table->ctx_num[uctx->cq_bank_id]--;
+ mutex_unlock(&cq_table->bank_mutex);
+}
+
+void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ u32 least_load = cq_table->ctx_num[0];
+ u8 bankid = 0;
+ u8 i;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ return;
+
+ mutex_lock(&cq_table->bank_mutex);
+ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ if (cq_table->ctx_num[i] < least_load) {
+ least_load = cq_table->ctx_num[i];
+ bankid = i;
+ }
+ }
+ cq_table->ctx_num[bankid]++;
+ mutex_unlock(&cq_table->bank_mutex);
+
+ uctx->cq_bank_id = bankid;
+}
+
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
{
u32 least_load = bank[0].inuse;
@@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
return bankid;
}
-static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_bank *bank, struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = udata ?
+ rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
+ ibucontext) : NULL;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return uctx ? uctx->cq_bank_id : 0;
+
+ return get_least_load_bankid_for_cq(bank);
+}
+
+static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct hns_roce_bank *bank;
@@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
int id;
mutex_lock(&cq_table->bank_mutex);
- bankid = get_least_load_bankid_for_cq(cq_table->bank);
+ bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
bank = &cq_table->bank[bankid];
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
@@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cq_buf;
}
- ret = alloc_cqn(hr_dev, hr_cq);
+ ret = alloc_cqn(hr_dev, hr_cq, udata);
if (ret) {
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
goto err_cq_db;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 78ee04a48a74..06832c0ac055 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -217,6 +217,7 @@ struct hns_roce_ucontext {
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
u32 config;
+ u8 cq_bank_id;
};
struct hns_roce_pd {
@@ -495,6 +496,7 @@ struct hns_roce_cq_table {
struct hns_roce_hem_table table;
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
struct mutex bank_mutex;
+ u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
};
struct hns_roce_srq_table {
@@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
enum hns_roce_mmap_type mmap_type);
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
+void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
+void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index f82bdd46a917..63052c0e7613 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
hr_reg_clear(fseg, FRMR_BLK_MODE);
+ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
+ hr_reg_clear(fseg, FRMR_ZBVA);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int j = 0;
int i;
- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
-
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
!!(wr->send_flags & IB_SEND_INLINE));
if (wr->send_flags & IB_SEND_INLINE)
@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+ curr_idx & (qp->sge.sge_cnt - 1));
+
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
if (msg_len != ATOMIC_WR_LEN)
@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+ /* RC and UD share the same DirectWQE field layout */
+ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
+
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
@@ -7048,7 +7053,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_roce_init;
}
-
handle->priv = hr_dev;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d50f36f8a110..f3607fe107a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_copy_to_udata;
+ hns_roce_get_cq_bankid_for_uctx(context);
+
return 0;
error_fail_copy_to_udata:
@@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ hns_roce_put_cq_bankid_for_uctx(context);
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&context->page_mutex);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6ff1b8ce580c..bdd879ac12dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
hr_qp->sq.wqe_cnt = cnt;
- cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}
@@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* sync the parameters of kernel QP to user's configuration */
cap->max_send_wr = cnt;
- cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index 3091f9345f12..fa6325adaede 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 4ae77cdde9dc..c1b8f81ea283 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -706,7 +706,7 @@ struct irdma_sc_dev {
u32 vchnl_ver;
u16 num_vfs;
u16 hmc_fn_id;
- u8 vf_id;
+ u16 vf_id;
bool privileged:1;
bool vchnl_up:1;
bool ceq_valid:1;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 76ce6137f2ba..c883c9ea5a83 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2503,6 +2503,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
+ iwcq->cq_num = cq_num;
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index ed21c1b56e8e..ac8b38701835 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -140,7 +140,7 @@ struct irdma_srq {
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
- u16 cq_num;
+ u32 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a23b364e24ff..651d76bca114 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1020,15 +1020,18 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
+ if (udata) {
+ cq->mcq.comp = mlx5_add_cq_to_tasklet;
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
+ } else {
+ cq->mcq.comp = mlx5_ib_cq_comp;
+ }
+
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
- if (udata)
- cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
- else
- cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
INIT_LIST_HEAD(&cq->wc_list);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index c0360c450880..75d60f2ad900 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -707,7 +707,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
struct iopt_area *area;
unsigned long unmapped_bytes = 0;
unsigned int tries = 0;
- int rc = -ENOENT;
+ /* If there are no mapped entries then success */
+ int rc = 0;
/*
* The domains_rwsem must be held in read mode any time any area->pages
@@ -777,8 +778,6 @@ again:
down_write(&iopt->iova_rwsem);
}
- if (unmapped_bytes)
- rc = 0;
out_unlock_iova:
up_write(&iopt->iova_rwsem);
@@ -815,13 +814,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
{
- int rc;
-
- rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
/* If the IOVAs are empty then unmap all succeeds */
- if (rc == -ENOENT)
- return 0;
- return rc;
+ return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
}
/* The caller must always free all the nodes in the allowed_iova rb_root. */
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 1542c5fd10a8..459a7c516915 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -367,6 +367,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
&unmapped);
if (rc)
goto out_put;
+ if (!unmapped) {
+ rc = -ENOENT;
+ goto out_put;
+ }
}
cmd->length = unmapped;
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 4514575818fc..b5b67a9d3fb3 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -130,9 +130,8 @@ struct iova_bitmap {
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
- unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
-
- return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+ return (iova >> bitmap->mapped.pgshift) /
+ BITS_PER_TYPE(*bitmap->bitmap);
}
/*
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e5805885394e..70290b35b317 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -166,7 +166,8 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
static const struct irq_domain_ops riscv_intc_domain_ops = {
.map = riscv_intc_domain_map,
.xlate = irq_domain_xlate_onecell,
- .alloc = riscv_intc_domain_alloc
+ .alloc = riscv_intc_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct fwnode_handle *riscv_intc_hwnode(void)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index e54419a4e731..541a20cb58f1 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1904,13 +1904,13 @@ out:
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
- kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
+ int err;
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
@@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hw->ctrl_urb) {
pr_warn("%s: No memory for control urb\n",
driver_info->vend_name);
- kfree(hw);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_free_hw;
}
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
- if (setup_instance(hw, dev->dev.parent))
- return -EIO;
+ if (setup_instance(hw, dev->dev.parent)) {
+ err = -EIO;
+ goto err_free_urb;
+ }
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
+
+err_free_urb:
+ usb_free_urb(hw->ctrl_urb);
+err_free_hw:
+ kfree(hw);
+ return err;
}
/* function called when an active device is removed */
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index d911021c1bb0..83862d57b126 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -1010,6 +1010,11 @@ int vb2_ioctl_remove_bufs(struct file *file, void *priv,
if (vb2_queue_is_busy(vdev->queue, file))
return -EBUSY;
+ if (vb2_fileio_is_active(vdev->queue)) {
+ dprintk(vdev->queue, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index b62fd12c93c1..74c59a94b2b0 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -1136,11 +1136,8 @@ int cx18_init_on_first_open(struct cx18 *cx)
int video_input;
int fw_retry_count = 3;
struct v4l2_frequency vf;
- struct cx18_open_id fh;
v4l2_std_id std;
- fh.cx = cx;
-
if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
return -ENXIO;
@@ -1220,14 +1217,14 @@ int cx18_init_on_first_open(struct cx18 *cx)
video_input = cx->active_input;
cx->active_input++; /* Force update of input */
- cx18_s_input(NULL, &fh, video_input);
+ cx18_do_s_input(cx, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
cx->std++; /* Force full standard initialization */
std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
- cx18_s_std(NULL, &fh, std);
- cx18_s_frequency(NULL, &fh, &vf);
+ cx18_do_s_std(cx, std);
+ cx18_do_s_frequency(cx, &vf);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 0f3019739d03..0d676a57e24e 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -521,10 +521,8 @@ static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
return 0;
}
-int cx18_s_input(struct file *file, void *fh, unsigned int inp)
+int cx18_do_s_input(struct cx18 *cx, unsigned int inp)
{
- struct cx18_open_id *id = file2id(file);
- struct cx18 *cx = id->cx;
v4l2_std_id std = V4L2_STD_ALL;
const struct cx18_card_video_input *card_input =
cx->card->video_inputs + inp;
@@ -558,6 +556,11 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
return 0;
}
+static int cx18_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return cx18_do_s_input(file2id(file)->cx, inp);
+}
+
static int cx18_g_frequency(struct file *file, void *fh,
struct v4l2_frequency *vf)
{
@@ -570,11 +573,8 @@ static int cx18_g_frequency(struct file *file, void *fh,
return 0;
}
-int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf)
{
- struct cx18_open_id *id = file2id(file);
- struct cx18 *cx = id->cx;
-
if (vf->tuner != 0)
return -EINVAL;
@@ -585,6 +585,12 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
return 0;
}
+static int cx18_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
+{
+ return cx18_do_s_frequency(file2id(file)->cx, vf);
+}
+
static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct cx18 *cx = file2id(file)->cx;
@@ -593,11 +599,8 @@ static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
return 0;
}
-int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
+int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std)
{
- struct cx18_open_id *id = file2id(file);
- struct cx18 *cx = id->cx;
-
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -642,6 +645,11 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
return 0;
}
+static int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ return cx18_do_s_std(file2id(file)->cx, std);
+}
+
static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
struct cx18_open_id *id = file2id(file);
diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h
index 97cd9f99e22d..42a8acd69735 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.h
+++ b/drivers/media/pci/cx18/cx18-ioctl.h
@@ -12,6 +12,8 @@ u16 cx18_service2vbi(int type);
void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt);
void cx18_set_funcs(struct video_device *vdev);
-int cx18_s_std(struct file *file, void *fh, v4l2_std_id std);
-int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int cx18_s_input(struct file *file, void *fh, unsigned int inp);
+
+struct cx18;
+int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std);
+int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf);
+int cx18_do_s_input(struct cx18 *cx, unsigned int inp);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 72a8f76a41f4..459eb6cc370c 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1247,15 +1247,12 @@ err:
int ivtv_init_on_first_open(struct ivtv *itv)
{
- struct v4l2_frequency vf;
/* Needed to call ioctls later */
- struct ivtv_open_id fh;
+ struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
+ struct v4l2_frequency vf;
int fw_retry_count = 3;
int video_input;
- fh.itv = itv;
- fh.type = IVTV_ENC_STREAM_TYPE_MPG;
-
if (test_bit(IVTV_F_I_FAILED, &itv->i_flags))
return -ENXIO;
@@ -1297,13 +1294,13 @@ int ivtv_init_on_first_open(struct ivtv *itv)
video_input = itv->active_input;
itv->active_input++; /* Force update of input */
- ivtv_s_input(NULL, &fh, video_input);
+ ivtv_do_s_input(itv, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
itv->std++; /* Force full standard initialization */
itv->std_out = itv->std;
- ivtv_s_frequency(NULL, &fh, &vf);
+ ivtv_do_s_frequency(s, &vf);
if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
/* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 84c73bd22f2d..8d5ea3aec06f 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -974,9 +974,8 @@ static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
return 0;
}
-int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
+int ivtv_do_s_input(struct ivtv *itv, unsigned int inp)
{
- struct ivtv *itv = file2id(file)->itv;
v4l2_std_id std;
int i;
@@ -1017,6 +1016,11 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
return 0;
}
+static int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return ivtv_do_s_input(file2id(file)->itv, inp);
+}
+
static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
{
struct ivtv *itv = file2id(file)->itv;
@@ -1065,10 +1069,9 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
return 0;
}
-int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf)
{
- struct ivtv *itv = file2id(file)->itv;
- struct ivtv_stream *s = &itv->streams[file2id(file)->type];
+ struct ivtv *itv = s->itv;
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1082,6 +1085,15 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
return 0;
}
+static int ivtv_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
+{
+ struct ivtv_open_id *id = file2id(file);
+ struct ivtv *itv = id->itv;
+
+ return ivtv_do_s_frequency(&itv->streams[id->type], vf);
+}
+
static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct ivtv *itv = file2id(file)->itv;
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.h b/drivers/media/pci/ivtv/ivtv-ioctl.h
index 7f8c6f43d397..96ca7e2ef973 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.h
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.h
@@ -9,6 +9,8 @@
#ifndef IVTV_IOCTL_H
#define IVTV_IOCTL_H
+struct ivtv;
+
u16 ivtv_service2vbi(int type);
void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
@@ -17,7 +19,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed);
void ivtv_set_funcs(struct video_device *vdev);
void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std);
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std);
-int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
+int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf);
+int ivtv_do_s_input(struct ivtv *itv, unsigned int inp);
#endif
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index fb6afb8e84f0..ee4f54d68349 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -167,13 +167,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id)
{
- struct uvc_streaming *stream;
+ struct uvc_streaming *stream, *last_stream;
+ unsigned int count = 0;
list_for_each_entry(stream, &dev->streams, list) {
+ count += 1;
+ last_stream = stream;
if (stream->header.bTerminalLink == id)
return stream;
}
+ /*
+ * If the streaming entity is referenced by an invalid ID, notify the
+ * user and use heuristics to guess the correct entity.
+ */
+ if (count == 1 && id == UVC_INVALID_ENTITY_ID) {
+ dev_warn(&dev->intf->dev,
+ "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one.");
+ return last_stream;
+ }
+
return NULL;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 1da953629010..25e66bf18f5f 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -2608,7 +2608,7 @@ EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
{
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
- sd->privacy_led = led_get(sd->dev, "privacy-led");
+ sd->privacy_led = led_get(sd->dev, "privacy");
if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
"getting privacy LED\n");
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index 4aae0733d0fc..ab594908cb4a 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -2,9 +2,11 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
+ This driver is intended to run on the BMC, not the managed node.
This driver can also be built as a module. If so, the module will
be called sbrmi-i2c.
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 621bce7e101c..ee652ef01534 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -381,6 +381,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
}
spin_unlock(&fl->lock);
+ dma_buf_put(buf);
+
return ret;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bc40b940ae21..a4f75dc36929 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -120,6 +120,8 @@
#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c
index 77686b108d3c..78717ee8ac9a 100644
--- a/drivers/misc/mei/mei_lb.c
+++ b/drivers/misc/mei/mei_lb.c
@@ -134,8 +134,7 @@ static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
return true;
}
-static int mei_lb_push_payload(struct device *dev,
- enum intel_lb_type type, u32 flags,
+static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags,
const void *payload, size_t payload_size)
{
struct mei_cl_device *cldev;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b108a7c22388..b017ff29dbd1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c9eb5c5393e4..06b55a891c6b 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -109,19 +109,19 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
+ err = mei_register(dev, &pdev->dev);
+ if (err)
+ goto release_irq;
+
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
- goto release_irq;
+ goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- err = mei_register(dev, &pdev->dev);
- if (err)
- goto stop;
-
pci_set_drvdata(pdev, dev);
/*
@@ -144,8 +144,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-stop:
- mei_stop(dev);
+deregister:
+ mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df51ee8db62..cc1d18b3df5c 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
{
unsigned long status, flags;
struct vmballoon *b;
- int ret;
+ int ret = 0;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
- * change and report failure.
+ * change.
*/
atomic64_dec(&b->size);
- ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (!ret) {
+ if (status == VMW_BALLOON_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2c963cb6724b..10d0ef58ef49 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -950,7 +950,7 @@ config MMC_USHC
config MMC_WMT
tristate "Wondermedia SD/MMC Host Controller support"
depends on ARCH_VT8500 || COMPILE_TEST
- default y
+ default ARCH_VT8500
help
This selects support for the SD/MMC Host Controller on
Wondermedia WM8505/WM8650 based SoCs.
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 82dd906bb002..681354942e97 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -42,7 +42,7 @@ struct dw_mci_rockchip_priv_data {
*/
static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample)
{
- unsigned long rate = clk_get_rate(host->ciu_clk);
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
u32 raw_value;
u16 degrees;
u32 delay_num = 0;
@@ -85,7 +85,7 @@ static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample)
static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees)
{
- unsigned long rate = clk_get_rate(host->ciu_clk);
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
u8 nineties, remainder;
u8 delay_num;
u32 raw_value;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 26d03352af63..b5ea058ed467 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -652,10 +652,9 @@ static int pxamci_probe(struct platform_device *pdev)
host->clkrt = CLKRT_OFF;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk)) {
- host->clk = NULL;
- return PTR_ERR(host->clk);
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(dev, PTR_ERR(host->clk),
+ "Failed to acquire clock\n");
host->clkrate = clk_get_rate(host->clk);
@@ -703,46 +702,37 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(host->dma_chan_rx)) {
- host->dma_chan_rx = NULL;
+ host->dma_chan_rx = devm_dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx))
return dev_err_probe(dev, PTR_ERR(host->dma_chan_rx),
"unable to request rx dma channel\n");
- }
- host->dma_chan_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(host->dma_chan_tx)) {
- dev_err(dev, "unable to request tx dma channel\n");
- ret = PTR_ERR(host->dma_chan_tx);
- host->dma_chan_tx = NULL;
- goto out;
- }
+
+ host->dma_chan_tx = devm_dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx))
+ return dev_err_probe(dev, PTR_ERR(host->dma_chan_tx),
+ "unable to request tx dma channel\n");
if (host->pdata) {
host->detect_delay_ms = host->pdata->detect_delay_ms;
host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(host->power)) {
- ret = PTR_ERR(host->power);
- dev_err(dev, "Failed requesting gpio_power\n");
- goto out;
- }
+ if (IS_ERR(host->power))
+ return dev_err_probe(dev, PTR_ERR(host->power),
+ "Failed requesting gpio_power\n");
/* FIXME: should we pass detection delay to debounce? */
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_cd\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_cd\n");
if (!host->pdata->gpio_card_ro_invert)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_ro\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_ro\n");
+
if (!ret)
host->use_ro_gpio = true;
@@ -759,16 +749,8 @@ static int pxamci_probe(struct platform_device *pdev)
if (ret) {
if (host->pdata && host->pdata->exit)
host->pdata->exit(dev, mmc);
- goto out;
}
- return 0;
-
-out:
- if (host->dma_chan_rx)
- dma_release_channel(host->dma_chan_rx);
- if (host->dma_chan_tx)
- dma_release_channel(host->dma_chan_tx);
return ret;
}
@@ -791,8 +773,6 @@ static void pxamci_remove(struct platform_device *pdev)
dmaengine_terminate_all(host->dma_chan_rx);
dmaengine_terminate_all(host->dma_chan_tx);
- dma_release_channel(host->dma_chan_rx);
- dma_release_channel(host->dma_chan_tx);
}
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index eebd45389956..5b61401a7f3d 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -94,7 +94,7 @@
#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x4
#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
#define DLL_STRBIN_DELAY_NUM_OFFSET 16
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index cf5be9c449a5..10064d7b7249 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -929,6 +929,10 @@ static void release_mdev(struct device *dev)
{
struct most_dev *mdev = to_mdev_from_dev(dev);
+ kfree(mdev->busy_urbs);
+ kfree(mdev->cap);
+ kfree(mdev->conf);
+ kfree(mdev->ep_address);
kfree(mdev);
}
/**
@@ -1093,7 +1097,7 @@ err_free_cap:
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
- put_device(&mdev->dev);
+ kfree(mdev);
return ret;
}
@@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface)
if (mdev->dci)
device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
-
- kfree(mdev->busy_urbs);
- kfree(mdev->cap);
- kfree(mdev->conf);
- kfree(mdev->ep_address);
- put_device(&mdev->dci->dev);
- put_device(&mdev->dev);
}
static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8dc4f5c493fc..335c702633ff 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -599,6 +599,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -618,7 +619,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size)
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
@@ -698,6 +699,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
size_t datbuf_len, oobbuf_len;
size_t orig_len, orig_ooblen;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -724,7 +726,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size) {
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4a17271076bc..1e57c8de8578 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -63,7 +63,7 @@ config MTD_NAND_ECC_MEDIATEK
config MTD_NAND_ECC_REALTEK
tristate "Realtek RTL93xx hardware ECC engine"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
depends on MACH_REALTEK_RTL || COMPILE_TEST
select MTD_NAND_ECC
help
diff --git a/drivers/mtd/nand/ecc-realtek.c b/drivers/mtd/nand/ecc-realtek.c
index 7d718934c909..0046da37ea3e 100644
--- a/drivers/mtd/nand/ecc-realtek.c
+++ b/drivers/mtd/nand/ecc-realtek.c
@@ -380,7 +380,7 @@ static void rtl_ecc_cleanup_ctx(struct nand_device *nand)
nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
}
-static struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
+static const struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
.init_ctx = rtl_ecc_init_ctx,
.cleanup_ctx = rtl_ecc_cleanup_ctx,
.prepare_io_req = rtl_ecc_prepare_io_req,
@@ -418,8 +418,8 @@ static int rtl_ecc_probe(struct platform_device *pdev)
rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma,
DMA_BIDIRECTIONAL, GFP_KERNEL);
- if (IS_ERR(rtlc->buf))
- return PTR_ERR(rtlc->buf);
+ if (!rtlc->buf)
+ return -ENOMEM;
rtlc->dev = dev;
rtlc->engine.dev = dev;
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index f37a6138e461..6d6aa709a21f 100644
--- a/drivers/mtd/nand/onenand/onenand_samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -906,7 +906,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, r->start,
s5pc110_onenand_irq,
IRQF_SHARED, "onenand",
- &onenand);
+ onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
return err;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 6667eea95597..32ed38b89394 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2871,7 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
- struct dma_device *dma_dev = cdns_ctrl->dmac->device;
+ struct dma_device *dma_dev;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2915,6 +2915,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
}
}
+ dma_dev = cdns_ctrl->dmac->device;
cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
cdns_ctrl->io.size,
DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/mtd/nand/spi/fmsh.c b/drivers/mtd/nand/spi/fmsh.c
index 8b2097bfc771..c2b9a8c113cb 100644
--- a/drivers/mtd/nand/spi/fmsh.c
+++ b/drivers/mtd/nand/spi/fmsh.c
@@ -58,7 +58,7 @@ static const struct spinand_info fmsh_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ 0,
SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)),
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4da619210c1f..5abef8a3b775 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2120,7 +2120,7 @@ skip_mac_set:
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (netif_carrier_ok(slave_dev)) {
+ if (netif_running(slave_dev) && netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2287,7 +2287,9 @@ skip_mac_set:
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
@@ -2463,7 +2465,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2662,7 +2665,8 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = netif_carrier_ok(slave->dev);
+ link_state = netif_running(slave->dev) &&
+ netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -2871,7 +2875,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2883,30 +2887,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2914,13 +2921,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 495a87f2ea7c..384499c869b8 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -225,13 +225,6 @@ static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ NULL, -1, 0},
};
-static const struct bond_opt_value bond_actor_port_prio_tbl[] = {
- { "minval", 0, BOND_VALFLAG_MIN},
- { "maxval", 65535, BOND_VALFLAG_MAX},
- { "default", 255, BOND_VALFLAG_DEFAULT},
- { NULL, -1, 0},
-};
-
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
@@ -497,7 +490,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_ACTOR_PORT_PRIO,
.name = "actor_port_prio",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
- .values = bond_actor_port_prio_tbl,
+ .flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_actor_port_prio_set,
},
[BOND_OPT_AD_ACTOR_SYSTEM] = {
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index bfc60eb33dc3..333ad42ea73b 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 0591406b6f32..6f83b87d54fc 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -452,7 +452,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
- if (!priv->do_set_mode) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
NL_SET_ERR_MSG(extack,
"Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
@@ -461,7 +463,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index c80032bc1a52..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 acc_id;
u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index 865a15e033a9..12200dcfd338 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int err;
u8 i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(priv->ndev, 0,
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 2f846381d5a7..eb767edc4c13 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -371,11 +371,11 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
* frames should be flooded or not.
*/
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
} else {
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_IP_MCAST_25;
+ mgmt |= B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
}
@@ -1372,6 +1372,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+ reg &= ~(0x3 << GMII_PO_SPEED_S);
+ if (is5301x(dev) || is58xx(dev))
+ reg &= ~PORT_OVERRIDE_SPEED_2000M;
+
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@@ -1390,6 +1394,11 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
+ if (is5325(dev))
+ reg &= ~PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
+
if (rx_pause) {
if (is5325(dev))
reg |= PORT_OVERRIDE_LP_FLOW_25;
@@ -1593,8 +1602,11 @@ static void b53_phylink_mac_link_down(struct phylink_config *config,
struct b53_device *dev = dp->ds->priv;
int port = dp->index;
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_force_link(dev, port, false);
return;
+ }
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@@ -1622,6 +1634,13 @@ static void b53_phylink_mac_link_up(struct phylink_config *config,
if (mode == MLO_AN_PHY) {
/* Re-negotiate EEE if it was enabled already */
p->eee_enabled = b53_eee_init(ds, port, phydev);
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ }
+
return;
}
@@ -2018,7 +2037,7 @@ static int b53_arl_search_wait(struct b53_device *dev)
do {
b53_read8(dev, B53_ARLIO_PAGE, offset, &reg);
if (!(reg & ARL_SRCH_STDN))
- return 0;
+ return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@@ -2068,13 +2087,16 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
- unsigned int count = 0;
u8 offset;
int ret;
u8 reg;
+ if (priv->num_arl_bins > 2)
+ results_per_hit = 2;
+
mutex_lock(&priv->arl_mutex);
if (is5325(priv) || is5365(priv))
@@ -2096,7 +2118,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
if (ret)
break;
- if (priv->num_arl_bins > 2) {
+ if (results_per_hit == 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
@@ -2106,7 +2128,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < b53_max_arl_entries(priv) / 2);
+ } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 309fe0e46dad..8ce1ce72e938 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -111,8 +111,7 @@
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
-#define B53_IP_MCAST_25 BIT(0)
-#define B53_IPMC_FWD_EN BIT(1)
+#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index d747ea1c41a7..5df8f153d511 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1355,9 +1355,15 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
+#define RESV_MCAST_CNT 8
+
+static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
+
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
+ u8 i, ports, update;
const u32 *masks;
+ bool override;
u32 data;
int ret;
@@ -1366,23 +1372,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
- /* Set the Override bit for forwarding BPDU packet to CPU */
- ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
- ALU_V_OVERRIDE | BIT(dev->cpu_port));
- if (ret < 0)
- return ret;
+ /* The reserved multicast address table has 8 entries. Each entry has
+ * a default value of which port to forward. It is assumed the host
+ * port is the last port in most of the switches, but that is not the
+ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
+ * port is port 5, the first RGMII port. It is okay for LAN9370, a
+ * 5-port switch, but may not be correct for the other 8-port
+ * versions. It is necessary to update the whole table to forward to
+ * the right ports.
+ * Furthermore PTP messages can use a reserved multicast address and
+ * the host will not receive them if this table is not correct.
+ */
+ for (i = 0; i < RESV_MCAST_CNT; i++) {
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_READ];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
- data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
- ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
- if (ret < 0)
- return ret;
+ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
+ if (ret < 0)
+ return ret;
- /* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
- return ret;
+ override = false;
+ ports = data & dev->port_mask;
+ switch (i) {
+ case 0:
+ case 6:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ override = true;
+ break;
+ case 2:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ break;
+ case 4:
+ case 5:
+ case 7:
+ /* Skip the host port. */
+ update = dev->port_mask & ~BIT(dev->cpu_port);
+ break;
+ default:
+ update = ports;
+ break;
+ }
+ if (update != ports || override) {
+ data &= ~dev->port_mask;
+ data |= update;
+ /* Set Override bit to receive frame even when port is
+ * closed.
+ */
+ if (override)
+ data |= ALU_V_OVERRIDE;
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
+ if (ret < 0)
+ return ret;
+
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_WRITE];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index ff579920078e..61ea11e3338e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -397,7 +397,6 @@
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
-#define ALU_RESV_MCAST_ADDR BIT(1)
#define REG_SW_ALU_VAL_A 0x0420
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a962055bfdbd..933ae8dc6337 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -808,6 +808,8 @@ static const u16 ksz9477_regs[] = {
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
+ [ALU_STAT_DIRECT] = 0,
+ [ALU_RESV_MCAST_ADDR] = BIT(1),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -835,6 +837,8 @@ static const u8 ksz9477_xmii_ctrl1[] = {
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
+ [ALU_STAT_DIRECT] = BIT(3),
+ [ALU_RESV_MCAST_ADDR] = BIT(2),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a1eb39771bb9..c65188cd3c0a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -294,6 +294,8 @@ enum ksz_masks {
DYNAMIC_MAC_TABLE_TIMESTAMP,
ALU_STAT_WRITE,
ALU_STAT_READ,
+ ALU_STAT_DIRECT,
+ ALU_RESV_MCAST_ADDR,
P_MII_TX_FLOW_CTRL,
P_MII_RX_FLOW_CTRL,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3fc33b1b4dfb..a625e7c311dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12439,7 +12439,7 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -16892,6 +16892,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
netif_close(dev);
+ if (bnxt_hwrm_func_drv_unrgtr(bp)) {
+ pcie_flr(pdev);
+ goto shutdown_exit;
+ }
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 741b2d854789..3613a172483a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2149,7 +2149,7 @@ struct bnxt_bs_trace_info {
static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
u32 offset)
{
- if (!bs_trace->wrapped &&
+ if (!bs_trace->wrapped && bs_trace->magic_byte &&
*bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
bs_trace->wrapped = 1;
bs_trace->last_offset = offset;
@@ -2941,6 +2941,7 @@ void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 0181ab1f2dfd..ccb8b509662d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -333,13 +333,14 @@ static void bnxt_fill_drv_seg_record(struct bnxt *bp,
u32 offset = 0;
int rc = 0;
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+
rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
if (rc)
return;
bnxt_bs_trace_check_wrap(bs_trace, offset);
- record->max_entries = cpu_to_le32(ctxm->max_entries);
- record->entry_size = cpu_to_le32(ctxm->entry_size);
record->offset = cpu_to_le32(bs_trace->last_offset);
record->wrapped = bs_trace->wrapped;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 02961d93ed35..67ca02d84c97 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -461,7 +461,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
rtnl_unlock();
break;
}
- bnxt_cancel_reservations(bp, false);
+ bnxt_clear_reservations(bp, false);
bnxt_free_ctx_mem(bp, false);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index db81cf6d5289..0abaa2bbe357 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -1051,9 +1051,9 @@ static void bnxt_ptp_free(struct bnxt *bp)
if (ptp->ptp_clock) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
}
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
}
int bnxt_ptp_init(struct bnxt *bp)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index ecd9a0bd5e18..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7077d705e471..6e4f17142519 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -733,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c96d1d6ba8fe..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index aae462a0cf5a..0535e92404e3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1595,6 +1595,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1630,7 +1632,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1638,6 +1642,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1947,6 +1953,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
struct xdp_buff xdp_buff;
@@ -2010,7 +2018,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -2045,7 +2055,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
@@ -2065,8 +2077,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -2075,6 +2090,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -2093,6 +2110,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -2104,10 +2122,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -2116,6 +2132,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 0ec010a7d640..f279fa597991 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -76,7 +76,7 @@ struct enetc_lso_t {
#define ENETC_LSO_MAX_DATA_LEN SZ_256K
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1edcfaee6819..3222359ac15b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1835,6 +1835,8 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index e96247c9d68d..a384a9ed4914 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -26,6 +26,19 @@ int gve_clock_nic_ts_read(struct gve_priv *priv)
return 0;
}
+static int gve_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int gve_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
{
const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
@@ -47,6 +60,8 @@ out:
static const struct ptp_clock_info gve_ptp_caps = {
.owner = THIS_MODULE,
.name = "gve clock",
+ .gettimex64 = gve_ptp_gettimex64,
+ .settime64 = gve_ptp_settime64,
.do_aux_work = gve_ptp_do_aux_work,
};
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 65302c41bfb1..38875c196cb6 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -148,6 +148,7 @@ config HIBMCGE
tristate "Hisilicon BMC Gigabit Ethernet Device Support"
depends on PCI && PCI_MSI
select PHYLIB
+ select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
help
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index ea09a09c451b..2097e4c2b3d7 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -17,6 +17,7 @@
#define HBG_PCU_CACHE_LINE_SIZE 32
#define HBG_TX_TIMEOUT_BUF_LEN 1024
#define HBG_RX_DESCR 0x01
+#define HBG_NO_PHY 0xFF
#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 83cf75bf7a17..e11495b7ee98 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ netif_device_detach(netdev);
return PCI_ERS_RESULT_DISCONNECT;
+ }
- pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
+ netif_device_detach(netdev);
+ pci_disable_device(pdev);
+
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"failed to re-enable PCI device after reset\n");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index d0aa0661ecd4..d6e8ce8e351a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+ if (priv->mac.phy_addr == HBG_NO_PHY)
+ return;
+
/* wait MAC link up */
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
link_status,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 8af0bc4cca21..ae4cb35186d8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
const struct hbg_irq_info *irq_info)
{
priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+ hbg_hw_irq_enable(priv, irq_info->mask, true);
}
#define HBG_IRQ_I(name, handle) \
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 37791de47f6f..b6f0a2780ea8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -20,7 +20,6 @@
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
-#define HBG_NO_PHY 0xFF
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 9d34d28ff168..782bb48c9f3d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 96553109f44c..cf881108fa57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
{
struct hclge_phy_reg_cmd *req;
struct hclge_desc desc;
@@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
req->reg_addr = cpu_to_le16(reg_addr);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read phy reg, ret = %d.\n", ret);
+ return ret;
+ }
- return le16_to_cpu(req->reg_val);
+ *val = le16_to_cpu(req->reg_val);
+ return 0;
}
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 4200d0b6d931..21d434c82475 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index a563a94e2780..122ee23497e6 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -146,7 +146,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select LIBIE_FWLOG
+ select LIBIE_FWLOG if DEBUG_FS
select MDIO
select NET_DEVLINK
select PLDMFW
@@ -298,7 +298,7 @@ config ICE
select DIMLIB
select LIBIE
select LIBIE_ADMINQ
- select LIBIE_FWLOG
+ select LIBIE_FWLOG if DEBUG_FS
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2250426ec91b..2532b6f82e97 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4382,6 +4382,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
unsigned int lane;
int err;
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
if (!options)
return -ENOMEM;
@@ -6497,6 +6506,28 @@ u32 ice_get_link_speed(u16 index)
}
/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
* ice_read_cgu_reg - Read a CGU register
* @hw: Pointer to the HW struct
* @addr: Register address to read
@@ -6510,8 +6541,8 @@ u32 ice_get_link_speed(u16 index)
int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -6542,8 +6573,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 363ae79a3620..013c93b6605e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1479,7 +1479,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 183dd5457d6a..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -50,6 +50,7 @@ enum ice_sbq_dev_id {
ice_sbq_dev_phy_0 = 0x02,
ice_sbq_dev_cgu = 0x06,
ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f8a208c84f15..10e2445e0ded 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f3e7218ba6f3..bb783042d1af 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGC_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
netdev_info(adapter->netdev, "Offline testing starting");
set_bit(__IGC_TESTING, &adapter->state);
+ /* power up PHY for link test */
+ igc_power_up_phy_copper(&adapter->hw);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 14d275270123..dce4936708eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -821,9 +821,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
-#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
/* Bitmask indicating in use pools */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ca1ccc630001..3190ce7e44c7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
shutdown_aci:
mutex_destroy(&adapter->hw.aci.lock);
ixgbe_release_hw_control(adapter);
- devlink_free(adapter->devlink);
clean_up_probe:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
if (disable_dev)
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 114dd88fc71c..6885d2343c48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
* disabled
*/
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index aff17c37ddde..902d6abaa3ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1516,10 +1516,8 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->xdp_cnt = numptrs;
pool->xdp = devm_kcalloc(pfvf->dev,
numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
- if (IS_ERR(pool->xdp)) {
- netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
- return PTR_ERR(pool->xdp);
- }
+ if (!pool->xdp)
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index e9f319a9bdd6..60f7ab1d72e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -66,8 +66,8 @@ void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
- struct mlx5_eqe *eqe)
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
@@ -95,7 +95,15 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
if (schedule_tasklet)
tasklet_schedule(&tasklet_ctx->task);
}
+EXPORT_SYMBOL(mlx5_add_cq_to_tasklet);
+static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
+{
+ mlx5_core_err(cq->eq->core.dev,
+ "CQ default completion callback, CQ #%u\n", cq->cqn);
+}
+
+#define MLX5_CQ_INIT_CMD_SN cpu_to_be32(2 << 28)
/* Callers must verify outbox status in case of err */
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
@@ -121,10 +129,19 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
cq->eq = eq;
cq->uid = MLX5_GET(create_cq_in, in, uid);
+
+ /* Kernel CQs must set the arm_db address prior to calling
+ * this function, allowing for the proper value to be
+ * initialized. User CQs are responsible for their own
+ * initialization since they do not use the arm_db field.
+ */
+ if (cq->arm_db)
+ *cq->arm_db = MLX5_CQ_INIT_CMD_SN;
+
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
- cq->comp = mlx5_add_cq_to_tasklet;
+ cq->comp = mlx5_core_cq_dummy_cb;
/* assuming CQ will be deleted before the EQ */
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index fceea83abbd7..887adf4807d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -541,7 +541,7 @@ static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
max_num_channels = mlx5e_get_max_num_channels(mdev);
if (val32 > max_num_channels) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Requested num_doorbells (%u) exceeds maximum number of channels (%u)",
+ "Requested num_doorbells (%u) exceeds max number of channels (%u)",
val32, max_num_channels);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 14e3207b14e7..a163f81f07c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -634,7 +634,10 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
+ u32 hd_per_page;
u16 hd_per_wqe;
+ u8 log_hd_per_page;
+ u8 log_hd_entry_size;
unsigned long *bitmap;
u16 pi;
u16 ci;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3692298e10f2..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -100,7 +100,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 0a4fb8c92268..35d9530037a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -804,7 +804,8 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
goto err_xfrm;
}
- if (mlx5_eswitch_block_mode(priv->mdev))
+ err = mlx5_eswitch_block_mode(priv->mdev);
+ if (err)
goto unblock_ipsec;
if (x->props.mode == XFRM_MODE_TUNNEL &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 5d7c15abfcaf..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -342,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -387,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index bf1d2769d4f1..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2893,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index d7a11ff9bbdb..da2d1eb52c13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -320,7 +320,6 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
return;
}
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- if (resync_post_get_progress_params(sq, priv_rx))
+ if (resync_post_get_progress_params(sq, priv_rx)) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
mlx5e_ktls_priv_rx_put(priv_rx);
+ }
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct tls_offload_resync_async *async_resync;
+ struct tls_offload_context_rx *rx_ctx;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
dev = mlx5_core_dma_dev(sq->channel->mdev);
- if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+ rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
+ async_resync = rx_ctx->resync_async;
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
+ }
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
DMA_FROM_DEVICE);
@@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
- tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ tls_offload_rx_resync_async_request_end(async_resync,
+ cpu_to_be32(hw_seq));
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
@@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
resync = &priv_rx->resync;
mlx5e_ktls_priv_rx_get(priv_rx);
- if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ return false;
+ }
return true;
}
@@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tls_offload_resync_async *resync_async;
struct net_device *netdev = rq->netdev;
struct net *net = dev_net(netdev);
struct sock *sk = NULL;
@@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
seq = th->seq;
datalen = skb->len - depth;
- tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
+ tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
rq->stats->tls_resync_req_start++;
unref:
@@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
resync_handle_seq_match(priv_rx, c);
}
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct mlx5e_ktls_rx_resync_buf *buf;
+
+ buf = wi->tls_get_params.buf;
+ priv_rx = buf->priv_rx;
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
+}
+
/* End of resync section */
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index f87b65c560ea..cb08799769ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
+
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d166c0d5189e..9b93da4d52f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -595,32 +595,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+ __u64 upper_limit_mbps;
+ __u64 upper_limit_gbps;
int i;
+ struct {
+ int scale;
+ const char *units_str;
+ } units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+ };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
+ upper_limit_mbps = 255 * MLX5E_100MB;
+ upper_limit_gbps = 255 * MLX5E_1GB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+ if (maxrate->tc_maxrate[i] <= upper_limit_mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else {
+ } else if (max_bw_value[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
+ } else {
+ netdev_err(netdev,
+ "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
+ i, maxrate->tc_maxrate[i],
+ upper_limit_gbps);
+ return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
- __func__, i, max_bw_value[i]);
+ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
+ max_bw_value[i] * units[max_bw_unit[i]].scale,
+ units[max_bw_unit[i]].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 53e5ae252eac..893e1380a7c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2125,14 +2125,12 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (!size_read)
return i;
- if (size_read == -EINVAL)
- return -EINVAL;
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
"Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
- return i;
+ return size_read;
}
i += size_read;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a56825921c23..5e17eae81f4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -242,8 +242,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
&attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -256,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -266,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -790,8 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u8 log_hd_per_page, log_hd_entry_size;
+ u16 hd_per_wq, hd_per_wqe;
u32 hd_pool_size;
- u16 hd_per_wq;
int wq_size;
int err;
@@ -814,11 +816,24 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
if (err)
goto err_umr_mkey;
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
+ log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ } else {
+ log_hd_per_page = order_base_2(hd_per_wqe);
+ log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ }
+
+ rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
+ rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
+ rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
+ rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
+
+ hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */
@@ -2204,7 +2219,6 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
@@ -6120,6 +6134,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 263d5628ee44..687cf123211d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -648,17 +648,20 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
+ int header_index)
{
- BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
+ return &shampo->pages[header_index >> shampo->log_hd_per_page];
}
-static u64 mlx5e_shampo_hd_offset(int header_index)
+static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
{
- return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 hd_per_page = shampo->hd_per_page;
+
+ return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
}
static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
@@ -671,7 +674,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom, i = 0;
+ int headroom, i;
headroom = rq->buff.headroom;
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
@@ -679,25 +682,24 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
- WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
- while (i < ksm_entries) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ for (i = 0; i < ksm_entries; i++, index++) {
+ struct mlx5e_frag_page *frag_page;
u64 addr;
- err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
- if (unlikely(err))
- goto err_unmap;
+ frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
+ frag_page);
+ if (err)
+ goto err_unmap;
+ }
addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
-
- for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
- header_offset = mlx5e_shampo_hd_offset(index++);
-
- umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
- .key = cpu_to_be32(lkey),
- .va = cpu_to_be64(addr + header_offset + headroom),
- };
- }
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(addr + header_offset + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
@@ -713,9 +715,9 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
return 0;
err_unmap:
- while (--i) {
+ while (--i >= 0) {
--index;
- header_offset = mlx5e_shampo_hd_offset(index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
if (!header_offset) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
@@ -735,12 +737,11 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_ksm_entries, len;
- max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev),
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
if (!ksm_entries)
return 0;
@@ -858,7 +859,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
@@ -1036,6 +1037,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
+ mlx5e_ktls_rx_resync_async_request_cancel(wi);
+#endif
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
@@ -1221,9 +1226,10 @@ static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ void *addr = netmem_address(frag_page->netmem);
- return netmem_address(frag_page->netmem) + head_offset;
+ return addr + head_offset + rq->buff.headroom;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1794,14 +1800,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
+
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(
@@ -2027,6 +2046,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2080,7 +2100,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@@ -2096,10 +2117,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
@@ -2110,9 +2136,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
skb = mlx5e_build_linear_skb(
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
- mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
@@ -2137,8 +2173,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
@@ -2230,7 +2269,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
@@ -2246,7 +2286,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
- if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
@@ -2319,7 +2359,10 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
+ return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ else
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7c029a7d0fd7..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1614,7 +1614,9 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, mode, fec_stats);
- fec_set_histograms_stats(priv, mode, hist);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b7227afcb51d..2702b3885f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -256,7 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
@@ -982,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -992,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1009,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 76382626ad41..929adeb50a98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
- atomic64_set(&esw->user_count, 0);
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4cf995be127d..44a142a041b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@ -3129,7 +3128,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3140,7 +3139,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index cb1319974f83..ccef64fb40b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -421,6 +421,13 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
__be64 *pas;
u32 i;
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+
cq_size = roundup_pow_of_two(cq_size);
MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
@@ -468,15 +475,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
if (err)
goto err_cqwq;
- conn->cq.mcq.cqe_sz = 64;
- conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
- conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
- *conn->cq.mcq.set_ci_db = 0;
- *conn->cq.mcq.arm_db = 0;
- conn->cq.mcq.vector = 0;
- conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
-
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 59c00c911275..3db0387bf6dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1430,11 +1430,10 @@ static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc,
MLX5_DEVCOM_HCA_PORTS,
&attr, NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ if (!dev->priv.hca_devcom_comp) {
mlx5_core_err(dev,
- "Failed to register devcom HCA component, err: %ld\n",
- PTR_ERR(dev->priv.hca_devcom_comp));
- return PTR_ERR(dev->priv.hca_devcom_comp);
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d0ba83d77cd1..29e7fa09c32c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -1444,7 +1444,7 @@ static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
compd = mlx5_devcom_register_component(mdev->priv.devc,
MLX5_DEVCOM_SHARED_CLOCK,
&attr, NULL, mdev);
- if (IS_ERR(compd))
+ if (!compd)
return;
mdev->clock_state->compdev = compd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index faa2833602c8..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -76,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -110,8 +108,10 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
@@ -122,7 +122,7 @@ mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
comp->key.key = attr->key;
@@ -160,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -240,31 +240,28 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
comp = mlx5_devcom_comp_alloc(id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -276,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -296,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -338,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -348,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -421,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f5c2701f6e87..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -221,8 +221,8 @@ static int sd_register(struct mlx5_core_dev *dev)
attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
&attr, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index df93625c9dfa..70c156591b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -978,9 +978,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %pe\n",
- dev->priv.devc);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 24ef7d66fa8a..7510c46e58a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -873,12 +873,6 @@ err_free_sqc:
return err;
}
-static void hws_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
int numa_node,
struct mlx5hws_send_engine *queue,
@@ -901,7 +895,6 @@ static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
- mcq->comp = hws_cq_complete;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 077a77fde670..d034372fa047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1049,12 +1049,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -1089,6 +1083,13 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
}
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ cq->mcq.vector = 0;
+ cq->mdev = mdev;
+
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -1112,27 +1113,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.comp = dr_cq_complete;
-
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
goto err_cqwq;
- cq->mcq.cqe_sz = 64;
- cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
- cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
- *cq->mcq.set_ci_db = 0;
-
- /* set no-zero value, in order to avoid the HW to run db-recovery on
- * CQ that used in polling mode.
- */
- *cq->mcq.arm_db = cpu_to_be32(2 << 28);
-
- cq->mcq.vector = 0;
- cq->mdev = mdev;
-
return cq;
err_cqwq:
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 2474dfd330f4..fe4e61405284 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
@@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
}
}
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
@@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
@@ -416,7 +416,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
@@ -442,7 +442,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
@@ -500,7 +500,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
@@ -603,7 +603,7 @@ void lan966x_stats_get(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
@@ -685,7 +685,7 @@ void lan966x_stats_get(struct net_device *dev,
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
@@ -701,7 +701,7 @@ int lan966x_stats_init(struct lan966x *lan966x)
return -ENOMEM;
/* Init stats worker */
- mutex_init(&lan966x->stats_lock);
+ spin_lock_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 7001584f1b7a..47752d3fde0b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1261,7 +1261,6 @@ cleanup_ports:
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
debugfs_remove_recursive(lan966x->debugfs_root);
@@ -1279,7 +1278,6 @@ static void lan966x_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 4f75f0688369..eea286c29474 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -295,8 +295,8 @@ struct lan966x {
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
- /* workqueue for reading stats */
- struct mutex stats_lock;
+ /* lock for reading stats */
+ spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a1471e38d118..2a37fc1ba4bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
u32 counter;
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
if (counter)
admin->cache.counter = counter;
}
@@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
{
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
lan_wr(admin->cache.counter, lan966x,
SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static void lan966x_vcap_cache_write(struct net_device *dev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 132626a3f9f7..9ef72f294117 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
err = nfp_ccm_mbox_alloc(nn);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
return nn;
+err_free_xsk_pools:
+ kfree(nn->dp.xsk_pools);
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d10b58ebf603..301ebee2fdc5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -29,6 +29,10 @@ static void ionic_tx_clean(struct ionic_queue *q,
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
+ /* Ensure TX descriptor writes reach memory before NIC reads them.
+ * Prevents device from fetching stale descriptors.
+ */
+ dma_wmb();
ionic_q_post(q, ring_dbell);
}
@@ -1444,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap;
int err;
- desc_info = &q->tx_info[q->head_idx];
-
- if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
- return -EIO;
-
- len = skb->len;
- mss = skb_shinfo(skb)->gso_size;
- outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPXIP4 |
- SKB_GSO_IPXIP6 |
- SKB_GSO_UDP_TUNNEL |
- SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
@@ -1470,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (unlikely(err)) {
- /* clean up mapping from ionic_tx_map_skb */
- ionic_tx_desc_unmap_bufs(q, desc_info);
+ if (unlikely(err))
return err;
- }
+ desc_info = &q->tx_info[q->head_idx];
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
+ len = skb->len;
+ mss = skb_shinfo(skb)->gso_size;
+ outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM));
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9d3bd65b85ff..e2d7ce1a85e8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2211,15 +2211,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 6fd0c1e9a7d5..7cfd9000f79d 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
kfree(mport);
}
+/*
+ * Takes ownership of @desc, even if it returns an error
+ */
static int efx_mae_process_mport(struct efx_nic *efx,
struct mae_mport_desc *desc)
{
@@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
if (!IS_ERR_OR_NULL(mport)) {
netif_err(efx, drv, efx->net_dev,
"mport with id %u does exist!!!\n", desc->mport_id);
+ kfree(desc);
return -EEXIST;
}
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index e1c5faff3b71..220eb5ce7583 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -1441,6 +1441,9 @@ static int emac_set_pauseparam(struct net_device *dev,
struct emac_priv *priv = netdev_priv(dev);
u8 fc = 0;
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
priv->flow_control_autoneg = pause->autoneg;
if (pause->autoneg) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 51ea0caf16c1..0786816e05f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1446,14 +1446,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 650d75b73e0b..7b90ecd3a55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4089,18 +4089,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -4109,7 +4102,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4507,6 +4500,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
@@ -4524,10 +4518,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->est && priv->est->enable &&
- priv->est->max_sdu[queue] &&
- skb->len > priv->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -7573,11 +7572,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 97e89a604abd..3b4d4696afe9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
@@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
index 0b6f6228ae35..ff02a79c00d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type)
value = readl(ioaddr + VLAN_INCL);
value |= VLAN_VLTI;
- value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_CSVL; /* Only use CVLAN */
value &= ~VLAN_VLC;
value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
writel(value, ioaddr + VLAN_INCL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index fa96db7c1a13..66e8b224827b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -276,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
/* The number of wireside clocks contained in the verify
* timeout counter. The default is 0x1312d0
* (10ms at 125Mhz in 1G mode).
+ * The frequency of the clock depends on the link speed
+ * and the PHY interface.
*/
- val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->qos.link_speed == SPEED_1000)
+ val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
+ else if (port->qos.link_speed == SPEED_100)
+ val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
+ else
+ val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ val = 125 * HZ_PER_MHZ; /* 125 MHz */
+ break;
+ default:
+ netdev_err(port->ndev, "selected mode does not supported IET\n");
+ return -EOPNOTSUPP;
+ }
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
val *= verify_time_ms; /* count for timeout ms */
@@ -295,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
u32 ctrl, status;
int try;
- try = 20;
- do {
- /* Reset the verify state machine by writing 1
- * to LINKFAIL
- */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ try = 3;
- /* Clear MAC_LINKFAIL bit to start Verify. */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ do {
msleep(port->qos.iet.verify_time_ms);
status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
@@ -330,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
netdev_dbg(port->ndev, "MAC Merge verify error\n");
return -ENODEV;
}
- } while (try-- > 0);
+ } while (--try > 0);
netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 59d6ab989c55..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_move_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
- list_for_each_safe(this, next, &cpts->events) {
+ list_for_each_safe(this, next, &cpts->events_rx) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
@@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index da53eb04b0a4..3f8237c17d09 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -66,6 +66,9 @@
#define FDB_GEN_CFG1 0x60
#define SMEM_VLAN_OFFSET 8
#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+#define FDB_HASH_SIZE_MASK GENMASK(6, 3)
+#define FDB_HASH_SIZE_SHIFT 3
+#define FDB_HASH_SIZE 3
#define FDB_GEN_CFG2 0x64
#define FDB_VLAN_EN BIT(6)
@@ -463,6 +466,8 @@ void icssg_init_emac_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
@@ -484,6 +489,8 @@ void icssg_init_fw_offload_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 857820657bac..5ee13db568f0 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_channel = knav_dma_open_channel(dev,
tx_pipe->dma_chan_name, &config);
- if (IS_ERR(tx_pipe->dma_channel)) {
+ if (!tx_pipe->dma_channel) {
dev_err(dev, "failed opening tx chan(%s)\n",
tx_pipe->dma_chan_name);
- ret = PTR_ERR(tx_pipe->dma_channel);
+ ret = -EINVAL;
goto err;
}
@@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
return 0;
err:
- if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+ if (tx_pipe->dma_channel)
knav_dma_close_channel(tx_pipe->dma_channel);
tx_pipe->dma_channel = NULL;
return ret;
@@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
netcp->dma_chan_name, &config);
- if (IS_ERR(netcp->rx_channel)) {
+ if (!netcp->rx_channel) {
dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
netcp->dma_chan_name);
- ret = PTR_ERR(netcp->rx_channel);
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1e2713f0c921..b37d6cfbfbe9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2427,7 +2427,8 @@ int wx_sw_init(struct wx *wx)
wx->oem_svid = pdev->subsystem_vendor;
wx->oem_ssid = pdev->subsystem_device;
wx->bus.device = PCI_SLOT(pdev->devfn);
- wx->bus.func = PCI_FUNC(pdev->devfn);
+ wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
+ rd32(wx, WX_CFG_PORT_ST));
if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
pdev->is_virtfn) {
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index d89b9b8a0a2c..2f8319e03182 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -97,6 +97,8 @@
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_PORT_ST 0x14404
+#define WX_CFG_PORT_ST_LANID GENMASK(9, 8)
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
@@ -557,8 +559,6 @@ enum WX_MSCA_CMD_value {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-#define WX_CFG_PORT_ST 0x14404
-
/******************* Receive Descriptor bit definitions **********************/
#define WX_RXD_STAT_DD BIT(0) /* Done */
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index 36ccc53b1797..ef860cfc629f 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
skb->data, skb->len,
mctp_usb_out_complete, skb);
+ /* Stops TX queue first to prevent race condition with URB complete */
+ netif_stop_queue(dev);
rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc)
+ if (rc) {
+ netif_wake_queue(dev);
goto err_drop;
- else
- netif_stop_queue(dev);
+ }
return NETDEV_TX_OK;
diff --git a/drivers/net/mdio/mdio-airoha.c b/drivers/net/mdio/mdio-airoha.c
index 1dc9939c8d7d..52e7475121ea 100644
--- a/drivers/net/mdio/mdio-airoha.c
+++ b/drivers/net/mdio/mdio-airoha.c
@@ -219,6 +219,8 @@ static int airoha_mdio_probe(struct platform_device *pdev)
priv = bus->priv;
priv->base_addr = addr;
priv->regmap = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk))
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 194570443493..bb6e03a92956 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
static void update_userdata(struct netconsole_target *nt)
{
- int complete_idx = 0, child_count = 0;
struct list_head *entry;
+ int child_count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_list_lock, flags);
/* Clear the current string in case the last userdatum was deleted */
nt->userdata_length = 0;
@@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
- break;
+ if (child_count >= MAX_EXTRADATA_ITEMS) {
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
child_count++;
item = container_of(entry, struct config_item, ci_entry);
@@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt)
* one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
* checked to not exceed MAX items with child_count above
*/
- complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
- MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
- item->ci_name, udm_item->value);
+ nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length],
+ MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
+ item->ci_name, udm_item->value);
}
- nt->userdata_length = strnlen(nt->extradata_complete,
- sizeof(nt->extradata_complete));
+ spin_unlock_irqrestore(&target_list_lock, flags);
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -931,6 +936,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
ret = strscpy(udm->value, buf, sizeof(udm->value));
@@ -944,6 +950,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -969,6 +976,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
if (msgid_enabled == curr)
@@ -989,6 +997,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1003,6 +1012,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
if (release_enabled == curr)
@@ -1023,6 +1033,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1037,6 +1048,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
if (taskname_enabled == curr)
@@ -1057,6 +1069,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1072,6 +1085,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
if (cpu_nr_enabled == curr)
@@ -1100,6 +1114,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 289f62c5d2c7..0d7f30360d87 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -560,16 +560,34 @@ static void ovpn_tcp_close(struct sock *sk, long timeout)
static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
+ struct sk_buff_head *queue = &sock->sk->sk_receive_queue;
struct ovpn_socket *ovpn_sock;
+ struct ovpn_peer *peer = NULL;
+ __poll_t mask;
rcu_read_lock();
ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
- if (ovpn_sock && ovpn_sock->peer &&
- !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
+ /* if we landed in this callback, we expect to have a
+ * meaningful state. The ovpn_socket lifecycle would
+ * prevent it otherwise.
+ */
+ if (WARN(!ovpn_sock || !ovpn_sock->peer,
+ "ovpn: null state in ovpn_tcp_poll!")) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ovpn_peer_hold(ovpn_sock->peer)) {
+ peer = ovpn_sock->peer;
+ queue = &peer->tcp.user_queue;
+ }
rcu_read_unlock();
+ mask = datagram_poll_queue(file, sock, wait, queue);
+
+ if (peer)
+ ovpn_peer_put(peer);
+
return mask;
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index deeefb962566..36a0c1b7f59c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
+ /* Although the DP83867 reports EEE capability through the
+ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
+ * is not actually implemented in hardware.
+ */
+ phy_disable_eee(phydev);
+
if (phy_interface_is_rgmii(phydev) ||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index a2cd1cc35cde..1f381d7b13ff 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -84,7 +84,7 @@
#define DP83869_CLK_DELAY_DEF 7
/* STRAP_STS1 bits */
-#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
+#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9)
#define DP83869_STRAP_STS1_RESERVED BIT(11)
#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
@@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
if (val < 0)
return val;
- dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
+ dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);
return 0;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index cad6ed3aa10b..4354241137d5 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -73,8 +73,11 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
return err;
err = mdiobus_register_reset(mdiodev);
- if (err)
+ if (err) {
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
return err;
+ }
/* Assert the reset signal */
mdio_device_reset(mdiodev, 1);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 79ce3eb6752b..01c87c9b7702 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -466,6 +466,12 @@ struct lan8842_priv {
u16 rev;
};
+struct lanphy_reg_data {
+ int page;
+ u16 addr;
+ u16 val;
+};
+
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
.cable_diag_reg = LAN8814_CABLE_DIAG,
@@ -2836,6 +2842,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN8814_PAGE_PCS_DIGITAL 2
/**
+ * LAN8814_PAGE_EEE - Selects Extended Page 3.
+ *
+ * This page contains EEE registers
+ */
+#define LAN8814_PAGE_EEE 3
+
+/**
* LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
*
* This page contains device-common registers that affect the entire chip.
@@ -2854,6 +2867,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN8814_PAGE_PORT_REGS 5
/**
+ * LAN8814_PAGE_POWER_REGS - Selects Extended Page 28.
+ *
+ * This page contains analog control registers and power mode registers.
+ */
+#define LAN8814_PAGE_POWER_REGS 28
+
+/**
* LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
*
* This page appears to hold fundamental system or global controls. In the
@@ -4262,6 +4282,8 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
+ shared->phydev = phydev;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -4317,8 +4339,6 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
phydev_dbg(phydev, "successfully registered ptp clock\n");
- shared->phydev = phydev;
-
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
@@ -4360,12 +4380,6 @@ static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
- /* Reset the PHY */
- lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
- LAN8814_QSGMII_SOFT_RESET,
- LAN8814_QSGMII_SOFT_RESET_BIT,
- LAN8814_QSGMII_SOFT_RESET_BIT);
-
/* Disable ANEG with QSGMII PCS Host side */
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
@@ -4451,6 +4465,12 @@ static int lan8814_probe(struct phy_device *phydev)
addr, sizeof(struct lan8814_shared_priv));
if (phy_package_init_once(phydev)) {
+ /* Reset the PHY */
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+
err = lan8814_release_coma_mode(phydev);
if (err)
return err;
@@ -5884,6 +5904,144 @@ static int lan8842_probe(struct phy_device *phydev)
return 0;
}
+#define LAN8814_POWER_MGMT_MODE_3_ANEG_MDI 0x13
+#define LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX 0x14
+#define LAN8814_POWER_MGMT_MODE_5_10BT_MDI 0x15
+#define LAN8814_POWER_MGMT_MODE_6_10BT_MDIX 0x16
+#define LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN 0x17
+#define LAN8814_POWER_MGMT_MODE_8_100BT_MDI 0x18
+#define LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX 0x19
+#define LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX 0x1a
+#define LAN8814_POWER_MGMT_MODE_11_100BT_MDIX 0x1b
+#define LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX 0x1c
+#define LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX 0x1d
+#define LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX 0x1e
+
+#define LAN8814_POWER_MGMT_DLLPD_D BIT(0)
+#define LAN8814_POWER_MGMT_ADCPD_D BIT(1)
+#define LAN8814_POWER_MGMT_PGAPD_D BIT(2)
+#define LAN8814_POWER_MGMT_TXPD_D BIT(3)
+#define LAN8814_POWER_MGMT_DLLPD_C BIT(4)
+#define LAN8814_POWER_MGMT_ADCPD_C BIT(5)
+#define LAN8814_POWER_MGMT_PGAPD_C BIT(6)
+#define LAN8814_POWER_MGMT_TXPD_C BIT(7)
+#define LAN8814_POWER_MGMT_DLLPD_B BIT(8)
+#define LAN8814_POWER_MGMT_ADCPD_B BIT(9)
+#define LAN8814_POWER_MGMT_PGAPD_B BIT(10)
+#define LAN8814_POWER_MGMT_TXPD_B BIT(11)
+#define LAN8814_POWER_MGMT_DLLPD_A BIT(12)
+#define LAN8814_POWER_MGMT_ADCPD_A BIT(13)
+#define LAN8814_POWER_MGMT_PGAPD_A BIT(14)
+#define LAN8814_POWER_MGMT_TXPD_A BIT(15)
+
+#define LAN8814_POWER_MGMT_C_D (LAN8814_POWER_MGMT_DLLPD_D | \
+ LAN8814_POWER_MGMT_ADCPD_D | \
+ LAN8814_POWER_MGMT_PGAPD_D | \
+ LAN8814_POWER_MGMT_DLLPD_C | \
+ LAN8814_POWER_MGMT_ADCPD_C | \
+ LAN8814_POWER_MGMT_PGAPD_C)
+
+#define LAN8814_POWER_MGMT_B_C_D (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B)
+
+#define LAN8814_POWER_MGMT_VAL1 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL2 LAN8814_POWER_MGMT_C_D
+
+#define LAN8814_POWER_MGMT_VAL3 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL4 (LAN8814_POWER_MGMT_B_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL5 LAN8814_POWER_MGMT_B_C_D
+
+#define LAN8814_EEE_WAKE_TX_TIMER 0x0e
+#define LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL 0x1f
+
+static const struct lanphy_reg_data short_center_tap_errata[] = {
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_3_ANEG_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_5_10BT_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_6_10BT_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN,
+ LAN8814_POWER_MGMT_VAL2 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_8_100BT_MDI,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_11_100BT_MDIX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+};
+
+static const struct lanphy_reg_data waketx_timer_errata[] = {
+ { LAN8814_PAGE_EEE,
+ LAN8814_EEE_WAKE_TX_TIMER,
+ LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL },
+};
+
+static int lanphy_write_reg_data(struct phy_device *phydev,
+ const struct lanphy_reg_data *data,
+ size_t num)
+{
+ int ret = 0;
+
+ while (num--) {
+ ret = lanphy_write_page_reg(phydev, data->page, data->addr,
+ data->val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int lan8842_erratas(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lanphy_write_reg_data(phydev, short_center_tap_errata,
+ ARRAY_SIZE(short_center_tap_errata));
+ if (ret)
+ return ret;
+
+ return lanphy_write_reg_data(phydev, waketx_timer_errata,
+ ARRAY_SIZE(waketx_timer_errata));
+}
+
static int lan8842_config_init(struct phy_device *phydev)
{
int ret;
@@ -5896,6 +6054,11 @@ static int lan8842_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ /* Apply the erratas for this device */
+ ret = lan8842_erratas(phydev);
+ if (ret < 0)
+ return ret;
+
/* Even if the GPIOs are set to control the LEDs the behaviour of the
* LEDs is wrong, they are not blinking when there is traffic.
* To fix this it is required to set extended LED mode
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index a724b21b4fe7..16a347084293 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -154,7 +154,7 @@
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
#define RTL_8221B_VB_CG 0x001cc849
-#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8221B_VM_CG 0x001cc84a
#define RTL_8251B 0x001cc862
#define RTL_8261C 0x001cc890
@@ -1523,16 +1523,16 @@ static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
}
static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
@@ -1879,7 +1879,7 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.probe = rtl822x_probe,
.get_features = rtl822x_get_features,
@@ -1892,8 +1892,8 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
- .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
.probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 85bd5d845409..232bbd79a4de 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
int i;
unsigned long gpio_bits = dev->driver_info->data;
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
@@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->driver_priv = priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
@@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
u8 buf[ETH_ALEN] = {0};
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 11352d85475a..3a4985b582cb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -192,6 +192,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (!skbn)
return 0;
+ /* Raw IP packets don't have a MAC header, but other subsystems
+ * (like xfrm) may still access MAC header offsets, so they must
+ * be initialized.
+ */
+ skb_reset_mac_header(skbn);
+
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 92add3daadbb..278e6cb6f4d9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -685,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
rtl8150_t *dev = netdev_priv(netdev);
int count, res;
+ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */
+ count = max(skb->len, ETH_ZLEN);
+ if (count % 64 == 0)
+ count++;
+ if (skb_padto(skb, count)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
netif_stop_queue(netdev);
- count = (skb->len < 60) ? 60 : skb->len;
- count = (count & 0x3f) ? count : count + 1;
dev->tx_skb = skb;
usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
skb->data, count, write_bulk_callback, dev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index bf01f2728531..697cd9d866d3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1659,6 +1659,8 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
+ cancel_work_sync(&dev->kevent);
+
while ((urb = usb_get_from_anchor(&dev->deferred))) {
dev_kfree_skb(urb->context);
kfree(urb->sg);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a757cbcab87f..0369dda5ed60 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -910,17 +910,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
goto ok;
}
- /*
- * Verify that we can indeed put this data into a skb.
- * This is here to handle cases when the device erroneously
- * tries to receive more than is possible. This is usually
- * the case of a broken device.
- */
- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
- net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
- dev_kfree_skb(skb);
- return NULL;
- }
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
@@ -1379,9 +1368,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
- /* TODO: support multi buffer. */
- if (prog && num_buf == 1)
- ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ if (prog) {
+ /* TODO: support multi buffer. */
+ if (num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
+ stats);
+ else
+ ret = XDP_ABORTED;
+ }
rcu_read_unlock();
switch (ret) {
@@ -2107,9 +2101,19 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+ struct sk_buff *skb;
+
+ /* Make sure that len does not exceed the size allocated in
+ * add_recvbuf_big.
+ */
+ if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
+ pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
+ dev->name, len,
+ (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
+ goto err;
+ }
+ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
@@ -2534,6 +2538,13 @@ err_buf:
return NULL;
}
+static inline u32
+virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
+{
+ return __le16_to_cpu(hdr_hash->hash_value_lo) |
+ (__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
+}
+
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
struct sk_buff *skb)
{
@@ -2560,7 +2571,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
+ skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type);
}
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
@@ -2620,22 +2631,28 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
- /* 1. Save the flags early, as the XDP program might overwrite them.
+ /* About the flags below:
+ * 1. Save the flags early, as the XDP program might overwrite them.
* These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
* stay valid after XDP processing.
* 2. XDP doesn't work with partially checksummed packets (refer to
* virtnet_xdp_set()), so packets marked as
* VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
*/
- flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
- if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs) {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
- else if (vi->big_packets)
+ } else if (vi->big_packets) {
+ void *p = page_address((struct page *)buf);
+
+ flags = ((struct virtio_net_common_hdr *)p)->hdr.flags;
skb = receive_big(dev, vi, rq, buf, len, stats);
- else
+ } else {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
+ }
if (unlikely(!skb))
return;
@@ -3306,6 +3323,10 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+ /* Make sure it's safe to cast between formats */
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr));
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr));
+
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
@@ -6745,7 +6766,7 @@ static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
hash_report = VIRTIO_NET_HASH_REPORT_NONE;
*rss_type = virtnet_xdp_rss_type[hash_report];
- *hash = __le32_to_cpu(hdr_hash->hash_value);
+ *hash = virtio_net_hash_value(hdr_hash);
return 0;
}
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index c5501826db1e..c058cc79137d 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -648,7 +648,8 @@ static int pef2256_add_audio_devices(struct pef2256 *pef2256)
audio_devs[i].id = i;
}
- ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, audio_devs, count,
+ NULL, 0, NULL);
kfree(audio_devs);
return ret;
}
@@ -822,8 +823,8 @@ static int pef2256_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pef2256);
- ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs,
- ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, pef2256_devs,
+ ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
if (ret) {
dev_err(pef2256->dev, "add devices failed (%d)\n", ret);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e595b0979a56..b4aad6604d6d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1764,32 +1764,33 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
- /* Sometimes the PCI HIF doesn't receive interrupt
- * for the service ready message even if the buffer
- * was completed. PCIe sniffer shows that it's
- * because the corresponding CE ring doesn't fires
- * it. Workaround here by polling CE rings. Since
- * the message could arrive at any time, continue
- * polling until timeout.
- */
- do {
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
- /* The 100 ms granularity is a tradeoff considering scheduler
- * overhead and response latency
- */
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- msecs_to_jiffies(100));
- if (time_left)
- return 0;
- } while (time_before(jiffies, timeout));
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
- ath10k_warn(ar, "failed to receive service ready completion\n");
- return -ETIMEDOUT;
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
+ return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
@@ -1937,6 +1938,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (cmd_id == WMI_CMD_UNSUPPORTED) {
ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
cmd_id);
+ dev_kfree_skb_any(skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 2810752260f2..812686173ac8 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
static const struct dmi_system_id ath11k_pm_quirk_table[] = {
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* X13 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T16 G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P16s G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
},
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 106e2530b64e..0e41b5a91d66 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- flags |= WMI_KEY_PAIRWISE;
+ flags = WMI_KEY_PAIRWISE;
else
- flags |= WMI_KEY_GROUP;
+ flags = WMI_KEY_GROUP;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
@@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
!arvif->num_stations);
- if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
+ if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
@@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
- if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
+ if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
arvif->reinstall_group_keys = true;
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 0491e3fd6b5e..e3b444333dee 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -5961,6 +5961,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
!tx_compl_param->status) {
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 1d7b60aa5cb0..db351c922018 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -4064,68 +4064,12 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
-static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- struct ieee80211_vif *vif = arvif->ahvif->vif;
- struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
- enum wmi_sta_powersave_param param;
- struct ieee80211_bss_conf *info;
- enum wmi_sta_ps_mode psmode;
- int ret;
- int timeout;
- bool enable_ps;
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- enable_ps = arvif->ahvif->ps;
- if (enable_ps) {
- psmode = WMI_STA_PS_MODE_ENABLED;
- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
-
- timeout = conf->dynamic_ps_timeout;
- if (timeout == 0) {
- info = ath12k_mac_get_link_bss_conf(arvif);
- if (!info) {
- ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return;
- }
-
- /* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
- }
-
- ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
- timeout);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
- arvif->vdev_id, ret);
- return;
- }
- } else {
- psmode = WMI_STA_PS_MODE_DISABLED;
- }
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
- arvif->vdev_id, psmode ? "enable" : "disable");
-
- ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
- if (ret)
- ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
- psmode, arvif->vdev_id, ret);
-}
-
static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
- struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4189,24 +4133,61 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
+}
- if (changed & BSS_CHANGED_PS) {
- links = ahvif->links_map;
- vif_cfg = &vif->cfg;
+static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- if (!arvif || !arvif->ar)
- continue;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ar = arvif->ar;
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
- if (ar->ab->hw_params->supports_sta_ps) {
- ahvif->ps = vif_cfg->ps;
- ath12k_mac_vif_setup_ps(arvif);
+ enable_ps = arvif->ahvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
}
+
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
}
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
}
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
@@ -4228,6 +4209,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -4514,6 +4496,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
+
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ ahvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
}
static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
@@ -8290,23 +8278,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
wake_up(&ar->txmgmt_empty_waitq);
}
-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
{
- struct sk_buff *msdu = skb;
+ struct sk_buff *msdu;
struct ieee80211_tx_info *info;
- struct ath12k *ar = ctx;
- struct ath12k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
- ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath12k *ar = ctx;
+
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@@ -8315,17 +8312,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct sk_buff *msdu = skb;
struct ath12k *ar = skb_cb->ar;
- struct ath12k_base *ab = ar->ab;
- if (skb_cb->vif == vif) {
- spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
- spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ if (skb_cb->vif == vif)
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8afaffe31031..bb96b87b2a6e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*cookie, le16_to_cpu(action_frame->len),
le32_to_cpu(af_params->channel));
- ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
- af_params);
+ ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 0dc9d28cd77b..e1752a513c73 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
/**
* brcmf_p2p_tx_action_frame() - send action frame over fil.
*
+ * @ifp: interface to transmit on.
* @p2p: p2p info struct for vif.
* @af_params: action frame data/info.
*
@@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
* The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
* frame is transmitted.
*/
-static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp,
+ struct brcmf_p2p_info *p2p,
struct brcmf_fil_af_params_le *af_params)
{
struct brcmf_pub *drvr = p2p->cfg->pub;
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
- /* check if it is a p2p_presence response */
- p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
- if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
- vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
- else
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-
- err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
bphy_err(drvr, " sending action frame has failed\n");
@@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
- * @cfg: driver private data for cfg80211 interface.
- * @ndev: net device to transmit on.
+ * @ifp: interface to transmit on.
* @af_params: configuration data for action frame.
*/
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_p2p_info *p2p = &cfg->p2p;
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_config_af_params config_af_params;
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
@@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
if (af_params->channel)
msleep(P2P_AF_RETRY_DELAY_TIME);
- ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);
tx_retry++;
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
@@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx);
- init_completion(&p2p->send_af_done);
INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
init_completion(&p2p->afx_hdl.act_frm_scan);
init_completion(&p2p->wait_next_af);
@@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
pri_ifp = brcmf_get_ifp(cfg->pub, 0);
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+ init_completion(&p2p->send_af_done);
+
if (p2pdev_forced) {
err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
if (IS_ERR(err_ptr)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index d2ecee565bf2..d3137ebd7158 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data);
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params);
bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 738f80fe0c50..f6f52d297a72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
bool is_deflink = link == &mld_vif->deflink;
+ u8 fw_id = link->fw_id;
if (WARN_ON(!link || link->active))
return;
@@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
- if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+ if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links))
return;
- RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL);
}
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
@@ -707,18 +708,13 @@ static int
iwl_mld_get_chan_load_from_element(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *vif = link_conf->vif;
const struct cfg80211_bss_ies *ies;
const struct element *bss_load_elem = NULL;
const struct ieee80211_bss_load_elem *bss_load;
guard(rcu)();
- if (ieee80211_vif_link_active(vif, link_conf->link_id))
- ies = rcu_dereference(link_conf->bss->beacon_ies);
- else
- ies = rcu_dereference(link_conf->bss->ies);
-
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
if (ies)
bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
ies->data, ies->len);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9c9e0e1c6e1d..867807abde66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -938,19 +938,12 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
+ u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
- u16 flags, cck_flag;
-
- if (is_new_rate) {
- flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
- cck_flag = IWL_MAC_BEACON_CCK;
- } else {
- cck_flag = IWL_MAC_BEACON_CCK_V1;
- flags = iwl_fw_rate_idx_to_plcp(rate_idx);
- }
if (rate_idx <= IWL_LAST_CCK_RATE)
- flags |= cck_flag;
+ flags |= is_new_rate ? IWL_MAC_BEACON_CCK
+ : IWL_MAC_BEACON_CCK_V1;
return flags;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 0c9c2492d8a7..0b12ee8ad618 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -463,7 +463,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
if (!aux_roc_te) /* Not a Aux ROC time event */
return -EINVAL;
- iwl_mvm_te_check_trigger(mvm, notif, te_data);
+ iwl_mvm_te_check_trigger(mvm, notif, aux_roc_te);
IWL_DEBUG_TE(mvm,
"Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
@@ -475,14 +475,14 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
ieee80211_remain_on_channel_expired(mvm->hw);
iwl_mvm_roc_finished(mvm); /* flush aux queue */
- list_del(&te_data->list); /* remove from list */
- te_data->running = false;
- te_data->vif = NULL;
- te_data->uid = 0;
- te_data->id = TE_MAX;
+ list_del(&aux_roc_te->list); /* remove from list */
+ aux_roc_te->running = false;
+ aux_roc_te->vif = NULL;
+ aux_roc_te->uid = 0;
+ aux_roc_te->id = TE_MAX;
} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
- te_data->running = true;
+ aux_roc_te->running = true;
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
} else {
IWL_DEBUG_TE(mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 22602c32faa5..fa995e235d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -159,9 +159,15 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- return (rate_idx >= IWL_FIRST_OFDM_RATE ?
- rate_idx - IWL_FIRST_OFDM_RATE :
- rate_idx);
+ if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
+ /* In the new rate legacy rates are indexed:
+ * 0 - 3 for CCK and 0 - 7 for OFDM.
+ */
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
+
+ return iwl_fw_rate_idx_to_plcp(rate_idx);
}
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 891e125ad30b..54d6d00ecdf1 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -2966,6 +2966,51 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
/*
* CMD_SET_BEACON.
*/
+
+static bool mwl8k_beacon_has_ds_params(const u8 *buf, int len)
+{
+ const struct ieee80211_mgmt *mgmt = (const void *)buf;
+ int ies_len;
+
+ if (len <= offsetof(struct ieee80211_mgmt, u.beacon.variable))
+ return false;
+
+ ies_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ return cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable,
+ ies_len) != NULL;
+}
+
+static void mwl8k_beacon_copy_inject_ds_params(struct ieee80211_hw *hw,
+ u8 *buf_dst, const u8 *buf_src,
+ int src_len)
+{
+ const struct ieee80211_mgmt *mgmt = (const void *)buf_src;
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ };
+ const u8 *ies;
+ int hdr_len, left, offs, pos;
+
+ ies = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ offs = ieee80211_ie_split(ies, src_len - hdr_len, before_ds_params,
+ ARRAY_SIZE(before_ds_params), 0);
+
+ pos = hdr_len + offs;
+ left = src_len - pos;
+
+ memcpy(buf_dst, buf_src, pos);
+
+ /* Inject a DSSS Parameter Set after SSID + Supp Rates */
+ buf_dst[pos + 0] = WLAN_EID_DS_PARAMS;
+ buf_dst[pos + 1] = 1;
+ buf_dst[pos + 2] = hw->conf.chandef.chan->hw_value;
+
+ memcpy(buf_dst + pos + 3, buf_src + pos, left);
+}
struct mwl8k_cmd_set_beacon {
struct mwl8k_cmd_pkt_hdr header;
__le16 beacon_len;
@@ -2975,17 +3020,33 @@ struct mwl8k_cmd_set_beacon {
static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *beacon, int len)
{
+ bool ds_params_present = mwl8k_beacon_has_ds_params(beacon, len);
struct mwl8k_cmd_set_beacon *cmd;
- int rc;
+ int rc, final_len = len;
- cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (!ds_params_present) {
+ /*
+ * mwl8k firmware requires a DS Params IE with the current
+ * channel in AP beacons. If mac80211/hostapd does not
+ * include it, inject one here. IE ID + length + channel
+ * number = 3 bytes.
+ */
+ final_len += 3;
+ }
+
+ cmd = kzalloc(sizeof(*cmd) + final_len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
- cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
- cmd->beacon_len = cpu_to_le16(len);
- memcpy(cmd->beacon, beacon, len);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + final_len);
+ cmd->beacon_len = cpu_to_le16(final_len);
+
+ if (ds_params_present)
+ memcpy(cmd->beacon, beacon, len);
+ else
+ mwl8k_beacon_copy_inject_ds_params(hw, cmd->beacon, beacon,
+ len);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 9f856042a67a..5903d82e1ab1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -2003,8 +2003,14 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta;
struct ieee80211_bss_conf *bss_conf;
+ /* This can happen in case of monitor injection */
+ if (!vif) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
if (link != IEEE80211_LINK_UNSPECIFIED) {
- bss_conf = rcu_dereference(txi->control.vif->link_conf[link]);
+ bss_conf = rcu_dereference(vif->link_conf[link]);
if (sta)
link_sta = rcu_dereference(sta->link[link]);
} else {
@@ -2065,13 +2071,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (txi->control.vif)
- hwsim_check_magic(txi->control.vif);
+ if (vif)
+ hwsim_check_magic(vif);
if (control->sta)
hwsim_check_sta_magic(control->sta);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
- ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
+ ieee80211_get_tx_rates(vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -6698,14 +6704,15 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
-static void remove_user_radios(u32 portid)
+static void remove_user_radios(u32 portid, int netgroup)
{
struct mac80211_hwsim_data *entry, *tmp;
LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
- if (entry->destroy_on_close && entry->portid == portid) {
+ if (entry->destroy_on_close && entry->portid == portid &&
+ entry->netgroup == netgroup) {
list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
@@ -6730,7 +6737,7 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
- remove_user_radios(notify->portid);
+ remove_user_radios(notify->portid, hwsim_net_get_netgroup(notify->net));
if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 2faa0de2a36e..8ee15a15f4ca 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -791,6 +791,7 @@ error:
if (urbs) {
for (i = 0; i < RX_URBS_COUNT; i++)
free_rx_urb(urbs[i]);
+ kfree(urbs);
}
return r;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c916176bd9f0..72fb675a696f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
+static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int entries = req->nr_integrity_segments;
@@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
* descriptor provides an explicit length, so we're relying on that
* mechanism to catch any misunderstandings between the application and
* device.
+ *
+ * P2P DMA also needs to use the blk_dma_iter method, so mptr setup
+ * leverages this routine when that happens.
*/
- if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
+ (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
iod->cmd.common.metadata = cpu_to_le64(iter.addr);
iod->meta_total_len = iter.len;
iod->meta_dma = iter.addr;
@@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req);
+ if (is_pci_p2pdma_page(bv.bv_page))
+ return nvme_pci_setup_meta_iter(req);
+
iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
@@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req)
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
nvme_pci_metadata_use_sgls(req))
- return nvme_pci_setup_meta_sgls(req);
+ return nvme_pci_setup_meta_iter(req);
return nvme_pci_setup_meta_mptr(req);
}
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index b340380f3892..ceba21684e82 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
const char *hash_name;
u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key;
- u8 buf[4];
+ u8 buf[4], sc_c = ctrl->concat ? 1 : 0;
int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id);
@@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
- memset(buf, 0, 4);
+ *buf = sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "HostHost", 8);
if (ret)
goto out;
+ memset(buf, 0, 4);
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret)
goto out;
diff --git a/drivers/nvmem/rcar-efuse.c b/drivers/nvmem/rcar-efuse.c
index f24bdb9cb5a7..d9a96a1d59c8 100644
--- a/drivers/nvmem/rcar-efuse.c
+++ b/drivers/nvmem/rcar-efuse.c
@@ -127,6 +127,7 @@ static const struct of_device_id rcar_fuse_match[] = {
{ .compatible = "renesas,r8a779h0-otp", .data = &rcar_fuse_v4m },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, rcar_fuse_match);
static struct platform_driver rcar_fuse_driver = {
.probe = rcar_fuse_probe,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 65c3c23255b7..1cd93549d093 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -671,6 +671,36 @@ err:
}
}
+static int of_check_msi_parent(struct device_node *dev_node, struct device_node **msi_node)
+{
+ struct of_phandle_args msi_spec;
+ int ret;
+
+ /*
+ * An msi-parent phandle with a missing or == 0 #msi-cells
+ * property identifies a 1:1 ID translation mapping.
+ *
+ * Set the msi controller node if the firmware matches this
+ * condition.
+ */
+ ret = of_parse_phandle_with_optional_args(dev_node, "msi-parent", "#msi-cells",
+ 0, &msi_spec);
+ if (ret)
+ return ret;
+
+ if ((*msi_node && *msi_node != msi_spec.np) || msi_spec.args_count != 0)
+ ret = -EINVAL;
+
+ if (!ret) {
+ /* Return with a node reference held */
+ *msi_node = msi_spec.np;
+ return 0;
+ }
+ of_node_put(msi_spec.np);
+
+ return ret;
+}
+
/**
* of_msi_xlate - map a MSI ID and find relevant MSI controller node
* @dev: device for which the mapping is to be done.
@@ -678,7 +708,7 @@ err:
* @id_in: Device ID.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in.
+ * or "msi-parent" property. If found, apply the mapping to @id_in.
* If @msi_np points to a non-NULL device node pointer, only entries targeting
* that node will be matched; if it points to a NULL value, it will receive the
* device node of the first matching target phandle, with a reference held.
@@ -692,14 +722,18 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
/*
* Walk up the device parent links looking for one with a
- * "msi-map" property.
+ * "msi-map" or an "msi-parent" property.
*/
- for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
+ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
"msi-map-mask", msi_np, &id_out))
break;
+ if (!of_check_msi_parent(parent_dev->of_node, msi_np))
+ break;
+ }
return id_out;
}
+EXPORT_SYMBOL_GPL(of_msi_xlate);
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
@@ -741,8 +775,10 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) {
d = irq_find_matching_host(it.node, token);
- if (d)
+ if (d) {
+ of_node_put(it.node);
return d;
+ }
}
return NULL;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7065a8e5f9b1..f94f5d384362 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -306,6 +306,7 @@ config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
depends on (PCI && !S390)
+ select SCREEN_INFO if X86
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 1eac012a8226..c0e1194a936b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -255,7 +255,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
u16 flags, mme;
u8 cap;
- cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 20c9333bcb1c..e92513c5bda5 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -23,6 +23,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_pcie_ecam_ops;
static struct pci_ops dw_child_pcie_ops;
#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
@@ -471,9 +472,6 @@ static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *re
if (IS_ERR(pp->cfg))
return PTR_ERR(pp->cfg);
- pci->dbi_base = pp->cfg->win;
- pci->dbi_phys_addr = res->start;
-
return 0;
}
@@ -529,7 +527,7 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
if (ret)
return ret;
- pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ pp->bridge->ops = &dw_pcie_ecam_ops;
pp->bridge->sysdata = pp->cfg;
pp->cfg->priv = pp;
} else {
@@ -842,12 +840,34 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
}
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+
static struct pci_ops dw_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 805edbbfe7eb..c48a20602d7f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -55,7 +55,6 @@
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
-#define PARF_SLV_DBI_ELBI 0x1b4
#define PARF_INT_ALL_STATUS 0x224
#define PARF_INT_ALL_CLEAR 0x228
#define PARF_INT_ALL_MASK 0x22c
@@ -65,16 +64,6 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
-#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
-#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
-#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
-#define PARF_ECAM_BASE 0x380
-#define PARF_ECAM_BASE_HI 0x384
#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -98,7 +87,6 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
-#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -146,9 +134,6 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
-/* PARF_SLV_DBI_ELBI */
-#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0)
-
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
@@ -262,6 +247,7 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
+ void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
@@ -326,47 +312,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
qcom_perst_assert(pcie, false);
}
-static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u64 addr, addr_end;
- u32 val;
-
- writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
- writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
-
- /*
- * The only device on the root bus is a single Root Port. If we try to
- * access any devices other than Device/Function 00.0 on Bus 0, the TLP
- * will go outside of the controller to the PCI bus. But with CFG Shift
- * Feature (ECAM) enabled in iATU, there is no guarantee that the
- * response is going to be all F's. Hence, to make sure that the
- * requester gets all F's response for accesses other than the Root
- * Port, configure iATU to block the transactions starting from
- * function 1 of the root bus to the end of the root bus (i.e., from
- * dbi_base + 4KB to dbi_base + 1MB).
- */
- addr = pci->dbi_phys_addr + SZ_4K;
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
-
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
-
- addr_end = pci->dbi_phys_addr + SZ_1M - 1;
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
-
- val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
- val |= PCIE_ECAM_BLOCKER_EN;
- writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
-}
-
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1094,6 +1039,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
return 0;
}
+static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
+{
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ return 0;
+}
+
+static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+
+ pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
+}
+
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -1320,7 +1284,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u16 offset;
int ret;
qcom_ep_reset_assert(pcie);
@@ -1329,17 +1292,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- if (pp->ecam_enabled) {
- /*
- * Override ELBI when ECAM is enabled, as when ECAM is enabled,
- * ELBI moves under the 'config' space.
- */
- offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
- pci->elbi_base = pci->dbi_base + offset;
-
- qcom_pci_config_ecam(pp);
- }
-
ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1380,9 +1332,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
pcie->cfg->ops->deinit(pcie);
}
+static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (pcie->cfg->ops->host_post_init)
+ pcie->cfg->ops->host_post_init(pcie);
+}
+
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.init = qcom_pcie_host_init,
.deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1444,6 +1406,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
.config_sid = qcom_pcie_config_sid_1_9_0,
@@ -1454,6 +1417,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 1bd5bf4a6097..b4b62b9ccc45 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -192,6 +192,12 @@ static void vmd_pci_msi_enable(struct irq_data *data)
data->chip->irq_unmask(data);
}
+static unsigned int vmd_pci_msi_startup(struct irq_data *data)
+{
+ vmd_pci_msi_enable(data);
+ return 0;
+}
+
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
@@ -210,6 +216,11 @@ static void vmd_pci_msi_disable(struct irq_data *data)
vmd_irq_disable(data->parent_data);
}
+static void vmd_pci_msi_shutdown(struct irq_data *data)
+{
+ vmd_pci_msi_disable(data);
+}
+
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
.irq_compose_msi_msg = vmd_compose_msi_msg,
@@ -309,6 +320,8 @@ static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
+ info->chip->irq_startup = vmd_pci_msi_startup;
+ info->chip->irq_shutdown = vmd_pci_msi_shutdown;
info->chip->irq_enable = vmd_pci_msi_enable;
info->chip->irq_disable = vmd_pci_msi_disable;
return true;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4492b809094b..36f8c0985430 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -958,6 +958,7 @@ void pci_save_aspm_l1ss_state(struct pci_dev *dev);
void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#ifdef CONFIG_PCIEASPM
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
@@ -965,6 +966,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
void pci_configure_ltr(struct pci_dev *pdev);
void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
+static inline void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap) { }
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7cc8281e7011..cedea47a3547 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -243,8 +243,7 @@ struct pcie_link_state {
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
- u32 clkpm_default:1; /* Default Clock PM state by BIOS or
- override */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
};
@@ -376,18 +375,6 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
pcie_set_clkpm_nocheck(link, enable);
}
-static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
- int enabled)
-{
- struct pci_dev *pdev = link->downstream;
-
- /* For devicetree platforms, enable ClockPM by default */
- if (of_have_populated_dt() && !enabled) {
- link->clkpm_default = 1;
- pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
- }
-}
-
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
@@ -410,7 +397,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- pcie_clkpm_override_default_link_state(link, enabled);
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
@@ -811,26 +797,23 @@ static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
struct pci_dev *pdev = link->downstream;
u32 override;
- /* For devicetree platforms, enable all ASPM states by default */
+ /* For devicetree platforms, enable L0s and L1 by default */
if (of_have_populated_dt()) {
- link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
+ if (link->aspm_support & PCIE_LINK_STATE_L0S)
+ link->aspm_default |= PCIE_LINK_STATE_L0S;
+ if (link->aspm_support & PCIE_LINK_STATE_L1)
+ link->aspm_default |= PCIE_LINK_STATE_L1;
override = link->aspm_default & ~link->aspm_enabled;
if (override)
- pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
- FLAG(override, L0S_UP, " L0s-up"),
- FLAG(override, L0S_DW, " L0s-dw"),
- FLAG(override, L1, " L1"),
- FLAG(override, L1_1, " ASPM-L1.1"),
- FLAG(override, L1_2, " ASPM-L1.2"),
- FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
- FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
+ pci_info(pdev, "ASPM: default states%s%s\n",
+ FLAG(override, L0S, " L0s"),
+ FLAG(override, L1, " L1"));
}
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
- u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
struct pci_bus *linkbus = parent->subordinate;
@@ -845,9 +828,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
- if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
+ if (!(parent->aspm_l0s_support && child->aspm_l0s_support) &&
+ !(parent->aspm_l1_support && child->aspm_l1_support))
return;
/* Configure common clock before checking latencies */
@@ -859,8 +841,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* read-only Link Capabilities may change depending on common clock
* configuration (PCIe r5.0, sec 7.5.3.6).
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
@@ -880,7 +860,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* given link unless components on both sides of the link each
* support L0s.
*/
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
+ if (parent->aspm_l0s_support && child->aspm_l0s_support)
link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
@@ -889,7 +869,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
+ if (parent->aspm_l1_support && child->aspm_l1_support)
link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
@@ -1546,6 +1526,19 @@ int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_enable_link_state_locked);
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap)
+{
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 0;
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 0;
+
+ pci_info(pdev, "ASPM: Link Capabilities%s%s treated as unsupported to avoid device defect\n",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L0S ? " L0s" : "",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L1 ? " L1" : "");
+
+}
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c83e75a0ec12..9cd032dff31e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -538,14 +538,10 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
}
if (io) {
bridge->io_window = 1;
- pci_read_bridge_io(bridge,
- pci_resource_n(bridge, PCI_BRIDGE_IO_WINDOW),
- true);
+ pci_read_bridge_io(bridge, &res, true);
}
- pci_read_bridge_mmio(bridge,
- pci_resource_n(bridge, PCI_BRIDGE_MEM_WINDOW),
- true);
+ pci_read_bridge_mmio(bridge, &res, true);
/*
* DECchip 21050 pass 2 errata: the bridge may miss an address
@@ -583,10 +579,7 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
bridge->pref_64_window = 1;
}
- pci_read_bridge_mmio_pref(bridge,
- pci_resource_n(bridge,
- PCI_BRIDGE_PREF_MEM_WINDOW),
- true);
+ pci_read_bridge_mmio_pref(bridge, &res, true);
}
void pci_read_bridge_bases(struct pci_bus *child)
@@ -1663,6 +1656,13 @@ void set_pcie_port_type(struct pci_dev *pdev)
if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
pdev->link_active_reporting = 1;
+#ifdef CONFIG_PCIEASPM
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 1;
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 1;
+#endif
+
parent = pci_upstream_bridge(pdev);
if (!parent)
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 214ed060ca1b..b9c252aa6fe0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2494,28 +2494,27 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
- pci_info(dev, "Disabling L0s\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+ pcie_aspm_remove_cap(dev, PCI_EXP_LNKCAP_ASPM_L0S);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
{
- pci_info(dev, "Disabling ASPM L0s/L1\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pcie_aspm_remove_cap(dev,
+ PCI_EXP_LNKCAP_ASPM_L0S | PCI_EXP_LNKCAP_ASPM_L1);
}
/*
@@ -2523,7 +2522,10 @@ static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
* upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
* disable both L0s and L1 for now to be safe.
*/
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x0451, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PASEMI, 0xa002, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0x1105, quirk_disable_aspm_l0s_l1);
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4a8735b275e4..3645f392a9fd 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1604,7 +1604,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- if (pref) {
+ if (pref && (pref->flags & IORESOURCE_PREFETCH)) {
pbus_size_mem(bus,
IORESOURCE_MEM | IORESOURCE_PREFETCH |
(pref->flags & IORESOURCE_MEM_64),
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index b58f94ee4891..436fa7f4c387 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -556,10 +556,8 @@ EXPORT_SYMBOL(vga_put);
static bool vga_is_firmware_default(struct pci_dev *pdev)
{
-#ifdef CONFIG_SCREEN_INFO
- struct screen_info *si = &screen_info;
-
- return pdev == screen_info_pci_dev(si);
+#if defined CONFIG_X86
+ return pdev == screen_info_pci_dev(&screen_info);
#else
return false;
#endif
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 4776013e0764..16a2fd9fdd9b 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -2015,6 +2015,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
/* Program crspace counters to count clock cycles using "count_clock" sysfs */
attr = &pmc->block[blk_num].attr_count_clock;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_count_clock_show;
attr->dev_attr.store = mlxbf_pmc_count_clock_store;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 46e62feeda3c..c122016d82f1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -432,7 +432,7 @@ config WIRELESS_HOTKEY
depends on INPUT
help
This driver provides supports for the wireless buttons found on some AMD,
- HP, & Xioami laptops.
+ HP, & Xiaomi laptops.
On such systems the driver should load automatically (via ACPI alias).
To compile this driver as a module, choose M here: the module will
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 31f9643a6a3b..f417dcc9af35 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -210,6 +210,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &g_series_quirks,
},
{
+ .ident = "Dell Inc. G15 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Dell Inc. G16 7630",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1639,7 +1647,7 @@ static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
static int wmax_wmi_suspend(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_suspend(dev);
return 0;
@@ -1647,7 +1655,7 @@ static int wmax_wmi_suspend(struct device *dev)
static int wmax_wmi_resume(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_resume(dev);
return 0;
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 841a5414d28a..28076929d6af 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -365,6 +365,13 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = {
/* Backlight brightness change event */
{ KE_IGNORE, 0x0003, { KEY_RESERVED } },
+ /*
+ * Electronic privacy screen toggled, extended data gives state,
+ * separate entries for on/off see handling in dell_wmi_process_key().
+ */
+ { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_OFF } },
+ { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_ON } },
+
/* Ultra-performance mode switch request */
{ KE_IGNORE, 0x000d, { KEY_RESERVED } },
@@ -435,6 +442,11 @@ static int dell_wmi_process_key(struct wmi_device *wdev, int type, int code, u16
"Dell tablet mode switch",
SW_TABLET_MODE, !buffer[0]);
return 1;
+ } else if (type == 0x0012 && code == 0x000c && remaining > 0) {
+ /* Eprivacy toggle, switch to "on" key entry for on events */
+ if (buffer[0] == 2)
+ key++;
+ used = 1;
} else if (type == 0x0012 && code == 0x000d && remaining > 0) {
value = (buffer[2] == 2);
used = 1;
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
index 476ec24d3702..9e052b164a1a 100644
--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -245,15 +245,12 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
if (IS_ERR(regulator->rdev))
return PTR_ERR(regulator->rdev);
- int3472->regulators[int3472->n_regulator_gpios].ena_gpio = gpio;
int3472->n_regulator_gpios++;
return 0;
}
void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472)
{
- for (int i = 0; i < int3472->n_regulator_gpios; i++) {
+ for (int i = 0; i < int3472->n_regulator_gpios; i++)
regulator_unregister(int3472->regulators[i].rdev);
- gpiod_put(int3472->regulators[i].ena_gpio);
- }
}
diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c
index f1d6d7b0cb75..b1d84b968112 100644
--- a/drivers/platform/x86/intel/int3472/led.c
+++ b/drivers/platform/x86/intel/int3472/led.c
@@ -43,7 +43,7 @@ int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gp
int3472->pled.lookup.provider = int3472->pled.name;
int3472->pled.lookup.dev_id = int3472->sensor_name;
- int3472->pled.lookup.con_id = "privacy-led";
+ int3472->pled.lookup.con_id = "privacy";
led_add_lookup(&int3472->pled.lookup);
return 0;
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index 8fe1c0a501c9..b5e2ffd5ea64 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -41,7 +41,7 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
- int num_domains, i;
+ int num_domains, i, ret;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
struct scmi_pm_domain *scmi_pd;
@@ -108,9 +108,18 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ ret = of_genpd_add_provider_onecell(np, scmi_pd_data);
+ if (ret)
+ goto err_rm_genpds;
+
dev_set_drvdata(dev, scmi_pd_data);
- return of_genpd_add_provider_onecell(np, scmi_pd_data);
+ return 0;
+err_rm_genpds:
+ for (i = num_domains - 1; i >= 0; i--)
+ pm_genpd_remove(domains[i]);
+
+ return ret;
}
static void scmi_pm_domain_remove(struct scmi_device *sdev)
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 33991f3c6b55..a34b260274f7 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -536,6 +536,8 @@ static void imx_gpc_remove(struct platform_device *pdev)
return;
}
}
+
+ of_node_put(pgc_node);
}
static struct platform_driver imx_gpc_driver = {
diff --git a/drivers/pmdomain/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c
index 5d478bb37ad6..5c3aa8983087 100644
--- a/drivers/pmdomain/samsung/exynos-pm-domains.c
+++ b/drivers/pmdomain/samsung/exynos-pm-domains.c
@@ -92,13 +92,14 @@ static const struct of_device_id exynos_pm_domain_of_match[] = {
{ },
};
-static const char *exynos_get_domain_name(struct device_node *node)
+static const char *exynos_get_domain_name(struct device *dev,
+ struct device_node *node)
{
const char *name;
if (of_property_read_string(node, "label", &name) < 0)
name = kbasename(node->full_name);
- return kstrdup_const(name, GFP_KERNEL);
+ return devm_kstrdup_const(dev, name, GFP_KERNEL);
}
static int exynos_pd_probe(struct platform_device *pdev)
@@ -115,20 +116,27 @@ static int exynos_pd_probe(struct platform_device *pdev)
if (!pd)
return -ENOMEM;
- pd->pd.name = exynos_get_domain_name(np);
+ pd->pd.name = exynos_get_domain_name(dev, np);
if (!pd->pd.name)
return -ENOMEM;
pd->base = of_iomap(np, 0);
- if (!pd->base) {
- kfree_const(pd->pd.name);
+ if (!pd->base)
return -ENODEV;
- }
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+ /*
+ * Some Samsung platforms with bootloaders turning on the splash-screen
+ * and handing it over to the kernel, requires the power-domains to be
+ * reset during boot.
+ */
+ if (IS_ENABLED(CONFIG_ARM) &&
+ of_device_is_compatible(np, "samsung,exynos4210-pd"))
+ exynos_pd_power_off(&pd->pd);
+
on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
pm_genpd_init(&pd->pd, NULL, !on);
@@ -147,15 +155,6 @@ static int exynos_pd_probe(struct platform_device *pdev)
parent.np, child.np);
}
- /*
- * Some Samsung platforms with bootloaders turning on the splash-screen
- * and handing it over to the kernel, requires the power-domains to be
- * reset during boot. As a temporary hack to manage this, let's enforce
- * a sync_state.
- */
- if (!ret)
- of_genpd_sync_state(np);
-
pm_runtime_enable(dev);
return ret;
}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 8106eb617c8c..c61cf9edac48 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -561,10 +561,14 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
return ptp_mask_en_single(pccontext->private_clkdata, argptr);
case PTP_SYS_OFFSET_PRECISE_CYCLES:
+ if (!ptp->has_cycles)
+ return -EOPNOTSUPP;
return ptp_sys_offset_precise(ptp, argptr,
ptp->info->getcrosscycles);
case PTP_SYS_OFFSET_EXTENDED_CYCLES:
+ if (!ptp->has_cycles)
+ return -EOPNOTSUPP;
return ptp_sys_offset_extended(ptp, argptr,
ptp->info->getcyclesx64);
default:
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 794ec6e71990..a5c363252986 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2548,7 +2548,7 @@ ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
for (i = 0; i < OCP_SMA_NUM; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
- bp->sma[1].dpll_prop.capabilities &=
+ bp->sma[i].dpll_prop.capabilities &=
~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
}
return;
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 022d98f3c32a..ea9c4058ee6a 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1613,6 +1613,8 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np,
step /= r1;
new[j].min = min;
+ new[j].min_sel = desc->linear_ranges[j].min_sel;
+ new[j].max_sel = desc->linear_ranges[j].max_sel;
new[j].step = step;
dev_dbg(dev, "%s: old range min %d, step %d\n",
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 1cb647ed70c6..a2d16e9abfb5 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -334,6 +334,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
"Failed to register regulator: %ld\n",
PTR_ERR(drvdata->dev));
+ gpiod_put(cfg.ena_gpiod);
return ret;
}
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 8b6b35716f53..c170345ac076 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -268,7 +268,6 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
return err;
rtc->alarm_irq = platform_get_irq(pdev, 0);
- rtc->alarm_enabled = true;
err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL,
cpcap_rtc_alarm_irq,
IRQF_TRIGGER_NONE | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index aabe62c283a1..7e9f7cb90c28 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -316,7 +316,7 @@ static int rx8025_init_client(struct i2c_client *client)
return hour_reg;
rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
} else {
- rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
+ rx8025->is_24 = (ctrl[0] & RX8025_BIT_CTRL1_1224);
}
out:
return err;
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 76ecf7b798f0..54c8429b16bf 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -258,7 +258,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
- rtc->irq_en = true;
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
IRQF_ONESHOT,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cc5d05dc395c..17173239301e 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -611,8 +611,9 @@ int scsi_host_busy(struct Scsi_Host *shost)
{
int cnt = 0;
- blk_mq_tagset_busy_iter(&shost->tag_set,
- scsi_host_check_in_flight, &cnt);
+ if (shost->tag_set.ops)
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 16d0f02af1e4..31d08c115521 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
host_bcode = FC_ERROR;
goto err;
}
- if (offset + len > fsp->data_len) {
+ if (size_add(offset, len) > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a761c0aa5127..83ff66f954e6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref)
* The mid-level driver tries to ensure that queuecommand never gets
* invoked concurrently with itself or the interrupt handler (although
* the interrupt handler may call this routine as part of request-
- * completion handling). Unfortunely, it sometimes calls the scheduler
+ * completion handling). Unfortunately, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
cmd = scsi_host_find_tag(ha->host, index);
/*
* We cannot just check if the index is valid,
- * becase if we are run from the scsi eh, then
+ * because if we are run from the scsi eh, then
* the scsi/block layer is going to prevent
* the tag from being released.
*/
@@ -4952,7 +4952,7 @@ recover_ha_init_adapter:
/* Upon successful firmware/chip reset, re-initialize the adapter */
if (status == QLA_SUCCESS) {
/* For ISP-4xxx, force function 1 to always initialize
- * before function 3 to prevent both funcions from
+ * before function 3 to prevent both functions from
* stepping on top of the other */
if (is_qla40XX(ha) && (ha->mac_index == 3))
ssleep(6);
@@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry = NULL;
/* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
+ * the target_id would get set when we issue the login
*/
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
cmds_max, sizeof(struct ddb_entry),
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 746ff6a1f309..1c13812a3f03 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -554,9 +554,9 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* happened, even if someone else gets the sense data.
*/
if (sshdr.asc == 0x28)
- scmd->device->ua_new_media_ctr++;
+ atomic_inc(&sdev->ua_new_media_ctr);
else if (sshdr.asc == 0x29)
- scmd->device->ua_por_ctr++;
+ atomic_inc(&sdev->ua_por_ctr);
}
if (scsi_sense_is_deferred(&sshdr))
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 567f9cd29102..6e4112143c76 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
}
/*
- * Our channel array is sparsley populated and we
+ * Our channel array could be sparsley populated and we
* initiated I/O on a processor/hw-q that does not
* currently have a designated channel. Fix this.
* The strategy is simple:
- * I. Ensure NUMA locality
- * II. Distribute evenly (best effort)
+ * I. Prefer the channel associated with the current CPU
+ * II. Ensure NUMA locality
+ * III. Distribute evenly (best effort)
*/
+ /* Prefer the channel on the I/O issuing processor/hw-q */
+ if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))
+ return stor_device->stor_chns[q_num];
+
node_mask = cpumask_of_node(cpu_to_node(q_num));
num_channels = 0;
@@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device,
/* See storvsc_change_target_cpu(). */
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
if (outgoing_channel != NULL) {
- if (outgoing_channel->target_cpu == q_num) {
- /*
- * Ideally, we want to pick a different channel if
- * available on the same NUMA node.
- */
- node_mask = cpumask_of_node(cpu_to_node(q_num));
- for_each_cpu_wrap(tgt_cpu,
- &stor_device->alloced_cpus, q_num + 1) {
- if (!cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- if (tgt_cpu == q_num)
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
- }
+ if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)
+ > ring_avail_percent_lowater)
+ goto found_channel;
- /*
- * All the other channels on the same NUMA node are
- * busy. Try to use the channel on the current CPU
- */
- if (hv_get_avail_to_write_percent(
- &outgoing_channel->outbound)
- > ring_avail_percent_lowater)
+ /*
+ * Channel is busy, try to find a channel on the same NUMA node
+ */
+ node_mask = cpumask_of_node(cpu_to_node(q_num));
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ if (!cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
goto found_channel;
+ }
+ }
- /*
- * If we reach here, all the channels on the current
- * NUMA node are busy. Try to find a channel in
- * other NUMA nodes
- */
- for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
- if (cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
+ /*
+ * If we reach here, all the channels on the current
+ * NUMA node are busy. Try to find a channel in
+ * all NUMA nodes
+ */
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
+ goto found_channel;
}
}
+ /*
+ * If we reach here, all the channels are busy. Use the
+ * original channel found.
+ */
} else {
spin_lock_irqsave(&stor_device->lock, flags);
outgoing_channel = stor_device->stor_chns[q_num];
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index a25ebe6cd503..553ae7ee20f1 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -402,7 +402,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
* @name: slave channel name
* @config: dma configuration parameters
*
- * Returns pointer to appropriate DMA channel on success or error.
+ * Return: Pointer to appropriate DMA channel on success or NULL on error.
*/
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
@@ -414,13 +414,13 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!kdev) {
pr_err("keystone-navigator-dma driver not registered\n");
- return (void *)-EINVAL;
+ return NULL;
}
chan_num = of_channel_match_helper(dev->of_node, name, &instance);
if (chan_num < 0) {
dev_err(kdev->dev, "No DMA instance with name %s\n", name);
- return (void *)-EINVAL;
+ return NULL;
}
dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
@@ -431,7 +431,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (config->direction != DMA_MEM_TO_DEV &&
config->direction != DMA_DEV_TO_MEM) {
dev_err(kdev->dev, "bad direction\n");
- return (void *)-EINVAL;
+ return NULL;
}
/* Look for correct dma instance */
@@ -443,7 +443,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
- return (void *)-EINVAL;
+ return NULL;
}
/* Look for correct dma channel from dma instance */
@@ -463,14 +463,14 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
- return (void *)-EINVAL;
+ return NULL;
}
if (atomic_read(&chan->ref_count) >= 1) {
if (!check_config(chan, config)) {
dev_err(kdev->dev, "channel %d config miss-match\n",
chan_num);
- return (void *)-EINVAL;
+ return NULL;
}
}
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index dbe640986825..b78163eaed61 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -192,6 +192,14 @@
#define SPI_NAND_OP_RESET 0xff
#define SPI_NAND_OP_DIE_SELECT 0xc2
+/* SNAND FIFO commands */
+#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08
+#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09
+#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a
+#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c
+#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e
+#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f
+
#define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256)
#define SPI_MAX_TRANSFER_SIZE 511
@@ -387,10 +395,26 @@ static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl,
return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
}
-static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
- const u8 *data, int len)
+static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl,
+ const u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
@@ -409,16 +433,32 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
return 0;
}
-static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
- int len)
+static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl,
+ u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
- err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
+ err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -618,6 +658,10 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (desc->info.offset + desc->info.length > U32_MAX)
return -EINVAL;
+ /* continuous reading is not supported */
+ if (desc->info.length > SPI_NAND_CACHE_SIZE)
+ return -E2BIG;
+
if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
@@ -654,13 +698,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
err = airoha_snand_nfi_config(as_ctrl);
if (err)
- return err;
+ goto error_dma_mode_off;
dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
err = dma_mapping_error(as_ctrl->dev, dma_addr);
if (err)
- return err;
+ goto error_dma_mode_off;
/* set dma addr */
err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
@@ -689,8 +733,9 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- /* set read addr */
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
+ /* set read addr: zero page offset + descriptor read offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -760,6 +805,8 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
+error_dma_mode_off:
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
@@ -824,7 +871,9 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
+ /* set write addr: zero page offset + descriptor write offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -892,18 +941,35 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_TO_DEVICE);
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
+ int op_len, addr_len, dummy_len;
+ u8 buf[20], *data;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
+ op_len = op->cmd.nbytes;
+ addr_len = op->addr.nbytes;
+ dummy_len = op->dummy.nbytes;
+
+ if (op_len + dummy_len + addr_len > sizeof(buf))
+ return -EIO;
+
+ data = buf;
+ for (i = 0; i < op_len; i++)
+ *data++ = op->cmd.opcode >> (8 * (op_len - i - 1));
+ for (i = 0; i < addr_len; i++)
+ *data++ = op->addr.val >> (8 * (addr_len - i - 1));
+ for (i = 0; i < dummy_len; i++)
+ *data++ = 0xff;
+
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
if (err < 0)
@@ -914,40 +980,40 @@ static int airoha_snand_exec_op(struct spi_mem *mem,
return err;
/* opcode */
- err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode));
+ data = buf;
+ err = airoha_snand_write_data(as_ctrl, data, op_len,
+ op->cmd.buswidth);
if (err)
return err;
/* addr part */
- cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
- put_unaligned_be64(op->addr.val, data);
-
- for (i = ARRAY_SIZE(data) - op->addr.nbytes;
- i < ARRAY_SIZE(data); i++) {
- err = airoha_snand_write_data(as_ctrl, cmd, &data[i],
- sizeof(data[0]));
+ data += op_len;
+ if (addr_len) {
+ err = airoha_snand_write_data(as_ctrl, data, addr_len,
+ op->addr.buswidth);
if (err)
return err;
}
/* dummy */
- data[0] = 0xff;
- for (i = 0; i < op->dummy.nbytes; i++) {
- err = airoha_snand_write_data(as_ctrl, 0x8, &data[0],
- sizeof(data[0]));
+ data += addr_len;
+ if (dummy_len) {
+ err = airoha_snand_write_data(as_ctrl, data, dummy_len,
+ op->dummy.buswidth);
if (err)
return err;
}
/* data */
- if (op->data.dir == SPI_MEM_DATA_IN) {
- err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
- op->data.nbytes);
- if (err)
- return err;
- } else {
- err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out,
- op->data.nbytes);
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
+ op->data.nbytes,
+ op->data.buswidth);
+ else
+ err = airoha_snand_write_data(as_ctrl, op->data.buf.out,
+ op->data.nbytes,
+ op->data.buswidth);
if (err)
return err;
}
diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c
index 4338d00e56a6..35a7c4965e11 100644
--- a/drivers/spi/spi-amlogic-spifc-a4.c
+++ b/drivers/spi/spi-amlogic-spifc-a4.c
@@ -286,7 +286,7 @@ static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
for (i = 0; i <= LANE_MAX; i++) {
if (buswidth == 1 << i) {
- conf = i << __bf_shf(mask);
+ conf = i << __ffs(mask);
return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
mask, conf);
}
@@ -566,7 +566,7 @@ static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
if (!op->data.nbytes)
goto end_xfer;
- conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE);
+ conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE);
ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
if (ret)
goto err_out;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 8fb13df8ff87..81017402bc56 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1995,7 +1995,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
- goto probe_setup_failed;
+ goto probe_dma_failed;
}
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
@@ -2019,9 +2019,10 @@ static int cqspi_probe(struct platform_device *pdev)
return 0;
probe_setup_failed:
- cqspi_controller_enable(cqspi, 0);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_disable(dev);
+probe_dma_failed:
+ cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index f0f576fac77a..7a5197586919 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -358,7 +358,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (IS_ERR(dwsmmio->rstc))
return PTR_ERR(dwsmmio->rstc);
- reset_control_deassert(dwsmmio->rstc);
+ ret = reset_control_deassert(dwsmmio->rstc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n");
dws->bus_num = pdev->id;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 155ddeb8fcd4..bbf1fd4fe1e9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -519,9 +519,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
- reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
- reg |= MX51_ECSPI_CTRL_XCH;
- writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+ if (spi_imx->usedma) {
+ reg = readl(spi_imx->base + MX51_ECSPI_DMA);
+ reg |= MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN;
+ writel(reg, spi_imx->base + MX51_ECSPI_DMA);
+ } else {
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+ }
}
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
@@ -759,7 +765,6 @@ static void mx51_setup_wml(struct spi_imx_data *spi_imx)
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(tx_wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
- MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
}
@@ -1520,6 +1525,8 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
reinit_completion(&spi_imx->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
+ spi_imx->devtype_data->trigger(spi_imx);
+
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
/* Wait SDMA to finish the data transfer.*/
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 4b63cb98df9c..b8c572394aac 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -75,10 +75,13 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4d23), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x5825), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 13bbb2133507..1775ad39e633 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -132,6 +132,7 @@
#define FLCOMP_C0DEN_16M 0x05
#define FLCOMP_C0DEN_32M 0x06
#define FLCOMP_C0DEN_64M 0x07
+#define FLCOMP_C0DEN_128M 0x08
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -1347,7 +1348,12 @@ static int intel_spi_read_desc(struct intel_spi *ispi)
case FLCOMP_C0DEN_64M:
ispi->chip0_size = SZ_64M;
break;
+ case FLCOMP_C0DEN_128M:
+ ispi->chip0_size = SZ_128M;
+ break;
default:
+ dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n",
+ flcomp & FLCOMP_C0DEN_MASK);
return -EINVAL;
}
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index f9371f98a65b..b6c79e50d842 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -404,6 +404,10 @@ struct nxp_fspi {
#define FSPI_NEED_INIT BIT(0)
#define FSPI_DTR_MODE BIT(1)
int flags;
+ /* save the previous operation clock rate */
+ unsigned long pre_op_rate;
+ /* the max clock rate fspi output to device */
+ unsigned long max_rate;
};
static inline int needs_ip_only(struct nxp_fspi *f)
@@ -685,10 +689,13 @@ static void nxp_fspi_select_rx_sample_clk_source(struct nxp_fspi *f,
* change the mode back to mode 0.
*/
reg = fspi_readl(f, f->iobase + FSPI_MCR0);
- if (op_is_dtr)
+ if (op_is_dtr) {
reg |= FSPI_MCR0_RXCLKSRC(3);
- else /*select mode 0 */
+ f->max_rate = 166000000;
+ } else { /*select mode 0 */
reg &= ~FSPI_MCR0_RXCLKSRC(3);
+ f->max_rate = 66000000;
+ }
fspi_writel(f, reg, f->iobase + FSPI_MCR0);
}
@@ -719,6 +726,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
0, POLL_TOUT, true);
if (ret)
dev_warn(f->dev, "DLL lock failed, please fix it!\n");
+
+ /*
+ * For ERR050272, DLL lock status bit is not accurate,
+ * wait for 4us more as a workaround.
+ */
+ udelay(4);
}
/*
@@ -780,11 +793,17 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
uint64_t size_kb;
/*
- * Return, if previously selected target device is same as current
- * requested target device. Also the DTR or STR mode do not change.
+ * Return when following condition all meet,
+ * 1, if previously selected target device is same as current
+ * requested target device.
+ * 2, the DTR or STR mode do not change.
+ * 3, previous operation max rate equals current one.
+ *
+ * For other case, need to re-config.
*/
if ((f->selected == spi_get_chipselect(spi, 0)) &&
- (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr))
+ (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr) &&
+ (f->pre_op_rate == op->max_freq))
return;
/* Reset FLSHxxCR0 registers */
@@ -802,6 +821,7 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0));
nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr);
+ rate = min(f->max_rate, op->max_freq);
if (op_is_dtr) {
f->flags |= FSPI_DTR_MODE;
@@ -832,6 +852,8 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
else
nxp_fspi_dll_override(f);
+ f->pre_op_rate = op->max_freq;
+
f->selected = spi_get_chipselect(spi, 0);
}
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 9eba5c0a60f2..b3c2b03b1153 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -704,7 +704,12 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_dma;
}
- sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ sfc->dma_buffer = dma_map_single(dev, sfc->buffer,
+ sfc->max_iosize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, sfc->dma_buffer)) {
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
}
ret = devm_spi_register_controller(dev, host);
@@ -715,6 +720,9 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
return 0;
err_register:
+ dma_unmap_single(dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
+err_dma_map:
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
err_dma:
pm_runtime_get_sync(dev);
@@ -736,6 +744,8 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ dma_unmap_single(&pdev->dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d59cc8a18484..c86dc56f38b4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -300,7 +300,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
- stalled = 10;
+ stalled = 32;
while (rx_words) {
if (rx_words == n_words && !(stalled--) &&
!(sr & XSPI_SR_TX_EMPTY_MASK) &&
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e0647a06890..e25df9990f82 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2851,6 +2851,18 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
+ /*
+ * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
+ * the GPIO controller does not have a driver yet. This needs to be done
+ * here too, because this call sets the GPIO direction and/or bias.
+ * Setting these needs to be done even if there is no driver, in which
+ * case spi_probe() will never get called.
+ * TODO: ideally the setup of the GPIO should be handled in a generic
+ * manner in the ACPI/gpiolib core code.
+ */
+ if (spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+
acpi_device_set_enumerated(adev);
adev->power.flags.ignore_parent = true;
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 94bbb3b6576d..01a5bb43cd2d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
return retval;
#endif
- retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
- *bytes_written += num_bytes;
- if (retval < 0)
- return retval;
+ if (fifotransferlength > 0) {
+ retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
+ *bytes_written += num_bytes;
+ if (retval < 0)
+ return retval;
+ }
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
@@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
break;
}
write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
- if (retval)
+ if (retval < 0)
return retval;
if (send_eoi) {
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 164dcfc3c9ef..f7bfb4a8e553 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board)
resource_size(e_priv->gpib_iomem_res));
}
fmh_gpib_generic_detach(board);
+
+ if (board->dev) {
+ put_device(board->dev);
+ board->dev = NULL;
+ }
}
static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index 4dec87d12687..1f8412de9fa3 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_
board->status &= ~clear_mask;
board->status &= ~ni_usb_ibsta_mask;
board->status |= ni_usb_ibsta & ni_usb_ibsta_mask;
- // FIXME should generate events on DTAS and DCAS
+ if (ni_usb_ibsta & DCAS)
+ push_gpib_event(board, EVENT_DEV_CLR);
+ if (ni_usb_ibsta & DTAS)
+ push_gpib_event(board, EVENT_DEV_TRG);
spin_lock_irqsave(&board->spinlock, flags);
/* remove set status bits from monitored set why ?***/
@@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
*/
break;
case NIUSB_ATN_STATE_ERROR:
- retval = -EIO;
- dev_err(&usb_dev->dev, "read when ATN set\n");
+ if (status.ibsta & DCAS) {
+ retval = -EINTR;
+ } else {
+ retval = -EIO;
+ dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta);
+ }
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig
index 927686abceb1..9f19dee08db4 100644
--- a/drivers/tee/qcomtee/Kconfig
+++ b/drivers/tee/qcomtee/Kconfig
@@ -2,6 +2,7 @@
# Qualcomm Trusted Execution Environment Configuration
config QCOMTEE
tristate "Qualcomm TEE Support"
+ depends on ARCH_QCOM || COMPILE_TEST
depends on !CPU_BIG_ENDIAN
select QCOM_SCM
select QCOM_TZMEM_MODE_SHMBRIDGE
diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c
index cc17a48d0ab7..ac134452cc9c 100644
--- a/drivers/tee/qcomtee/call.c
+++ b/drivers/tee/qcomtee/call.c
@@ -308,7 +308,7 @@ out_failed:
}
/* Release any IO and OO objects not processed. */
- for (; u[i].type && i < num_params; i++) {
+ for (; i < num_params && u[i].type; i++) {
if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
u[i].type == QCOMTEE_ARG_TYPE_IO)
qcomtee_object_put(u[i].o);
diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c
index 783acc59cfa9..b6715ada7700 100644
--- a/drivers/tee/qcomtee/core.c
+++ b/drivers/tee/qcomtee/core.c
@@ -424,7 +424,7 @@ static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
memcpy(msgptr, u[i].b.addr, u[i].b.size);
else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
- return -EINVAL;
+ return -EFAULT;
offset += qcomtee_msg_offset_align(u[i].b.size);
ib++;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a53ba04d9770..710ae4d40aec 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -635,7 +635,9 @@ static int dw8250_probe(struct platform_device *pdev)
if (IS_ERR(data->rst))
return PTR_ERR(data->rst);
- reset_control_deassert(data->rst);
+ err = reset_control_deassert(data->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to deassert resets\n");
err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
if (err)
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 04a0cbab02c2..b9cc0b786ca6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -40,6 +40,8 @@
#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db
#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea
+#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018
+
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -1622,6 +1624,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = {
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_adv_XR17V352 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
@@ -1696,6 +1704,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+ /* ADVANTECH devices */
+ EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352),
+
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b44de2ed7413..5875a7b9b4b1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
while
(serial_in(up, MTK_UART_DEBUG0));
+ clk_disable_unprepare(data->uart_clk);
clk_disable_unprepare(data->bus_clk);
return 0;
@@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
struct mtk8250_data *data = dev_get_drvdata(dev);
clk_prepare_enable(data->bus_clk);
+ clk_prepare_enable(data->uart_clk);
return 0;
}
@@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
int dmacnt;
#endif
- data->uart_clk = devm_clk_get(&pdev->dev, "baud");
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(data->uart_clk)) {
/*
* For compatibility with older device trees try unnamed
* clk when no baud clk can be found.
*/
- data->uart_clk = devm_clk_get(&pdev->dev, NULL);
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(data->uart_clk)) {
dev_warn(&pdev->dev, "Can't get uart clock\n");
return PTR_ERR(data->uart_clk);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 1a2c4c14f6aa..c7435595dce1 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -588,13 +588,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= prescaler;
}
- /* Enable enhanced features */
- sc16is7xx_efr_lock(port);
- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
- SC16IS7XX_EFR_ENABLE_BIT,
- SC16IS7XX_EFR_ENABLE_BIT);
- sc16is7xx_efr_unlock(port);
-
/* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 538b2f991609..62bb62b82cbe 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1014,16 +1014,18 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
struct sci_port *s = to_sci_port(port);
const struct plat_sci_reg *reg;
int copied = 0;
- u16 status;
+ u32 status;
- reg = sci_getreg(port, s->params->overrun_reg);
- if (!reg->size)
- return 0;
+ if (s->type != SCI_PORT_RSCI) {
+ reg = sci_getreg(port, s->params->overrun_reg);
+ if (!reg->size)
+ return 0;
+ }
- status = sci_serial_in(port, s->params->overrun_reg);
+ status = s->ops->read_reg(port, s->params->overrun_reg);
if (status & s->params->overrun_mask) {
status &= ~s->params->overrun_mask;
- sci_serial_out(port, s->params->overrun_reg, status);
+ s->ops->write_reg(port, s->params->overrun_reg, status);
port->icount.overrun++;
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index c040afc6668e..0086816b27cd 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -1949,7 +1949,7 @@ static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
return hba->dev_info.hid_sup ? attr->mode : 0;
}
-const struct attribute_group ufs_sysfs_hid_group = {
+static const struct attribute_group ufs_sysfs_hid_group = {
.name = "hid",
.attrs = ufs_sysfs_hid,
.is_visible = ufs_sysfs_hid_is_visible,
diff --git a/drivers/ufs/core/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h
index 6efb82a082fd..8d94af3b8077 100644
--- a/drivers/ufs/core/ufs-sysfs.h
+++ b/drivers/ufs/core/ufs-sysfs.h
@@ -14,6 +14,5 @@ void ufs_sysfs_remove_nodes(struct device *dev);
extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
extern const struct attribute_group ufs_sysfs_lun_attributes_group;
-extern const struct attribute_group ufs_sysfs_hid_group;
#endif
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8339fec975b9..d6a060a72461 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -4282,8 +4282,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel),
UFS_UIC_COMMAND_RETRIES - retries);
- if (mib_val && !ret)
- *mib_val = uic_cmd.argument3;
+ if (mib_val)
+ *mib_val = ret == 0 ? uic_cmd.argument3 : 0;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
&& pwr_mode_change)
@@ -4999,7 +4999,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
- int tx_lanes = 0, i, err = 0;
+ int tx_lanes, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -5066,7 +5066,8 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
* If UFS device isn't active then we will have to issue link startup
* 2 times to make sure the device state move to active.
*/
- if (!ufshcd_is_ufs_dev_active(hba))
+ if (!(hba->quirks & UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE) &&
+ !ufshcd_is_ufs_dev_active(hba))
link_startup_again = true;
link_startup:
@@ -5131,12 +5132,8 @@ link_startup:
ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret) {
+ if (ret)
dev_err(hba->dev, "link startup failed %d\n", ret);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_evt_hist(hba);
- }
return ret;
}
@@ -6673,6 +6670,20 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+ /*
+ * Use ufshcd_rpm_get_noresume() here to safely perform link recovery
+ * even if an error occurs during runtime suspend or runtime resume.
+ * This avoids potential deadlocks that could happen if we tried to
+ * resume the device while a PM operation is already in progress.
+ */
+ ufshcd_rpm_get_noresume(hba);
+ if (hba->pm_op_in_progress) {
+ ufshcd_link_recovery(hba);
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ ufshcd_rpm_put(hba);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -6684,14 +6695,6 @@ static void ufshcd_err_handler(struct work_struct *work)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_rpm_get_noresume(hba);
- if (hba->pm_op_in_progress) {
- ufshcd_link_recovery(hba);
- ufshcd_rpm_put(hba);
- return;
- }
- ufshcd_rpm_put(hba);
-
ufshcd_err_handling_prepare(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -8497,8 +8500,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) &
UFS_DEV_HID_SUPPORT;
- sysfs_update_group(&hba->dev->kobj, &ufs_sysfs_hid_group);
-
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -10655,7 +10656,7 @@ remove_scsi_host:
* @mmio_base: base register address
* @irq: Interrupt line of device
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 on success; < 0 on failure.
*/
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{
@@ -10885,8 +10886,8 @@ initialized:
if (err)
goto out_disable;
- async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
+ async_schedule(ufshcd_async_scan, hba);
device_enable_async_suspend(dev);
ufshcd_pm_qos_init(hba);
@@ -10896,7 +10897,7 @@ out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
out_error:
- return err;
+ return err > 0 ? -EIO : err;
}
EXPORT_SYMBOL_GPL(ufshcd_init);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 3e83dc51d538..eba0e6617483 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -740,8 +740,21 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
/* reset the connected UFS device during power down */
- if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ if (ufs_qcom_is_link_off(hba) && host->device_reset) {
ufs_qcom_device_reset_ctrl(hba, true);
+ /*
+ * After sending the SSU command, asserting the rst_n
+ * line causes the device firmware to wake up and
+ * execute its reset routine.
+ *
+ * During this process, the device may draw current
+ * beyond the permissible limit for low-power mode (LPM).
+ * A 10ms delay, based on experimental observations,
+ * allows the UFS device to complete its hardware reset
+ * before transitioning the power rail to LPM.
+ */
+ usleep_range(10000, 11000);
+ }
return ufs_qcom_ice_suspend(host);
}
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index b87e03777395..5f65dfad1a71 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/suspend.h>
#include <linux/debugfs.h>
#include <linux/uuid.h>
#include <linux/acpi.h>
@@ -31,6 +32,7 @@ struct intel_host {
u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
+ int saved_spm_lvl;
struct dentry *debugfs_root;
struct gpio_desc *reset_gpio;
};
@@ -347,6 +349,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
+ host->saved_spm_lvl = -1;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
if (INTEL_DSM_SUPPORTED(host, RESET)) {
@@ -425,7 +428,8 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
static int ufs_intel_adl_init(struct ufs_hba *hba)
{
hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 |
+ UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE;
hba->caps |= UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
@@ -538,6 +542,66 @@ static int ufshcd_pci_restore(struct device *dev)
return ufshcd_system_resume(dev);
}
+
+static int ufs_intel_suspend_prepare(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct intel_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ /*
+ * Only s2idle (S0ix) retains link state. Force power-off
+ * (UFS_PM_LVL_5) for any other case.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && hba->spm_lvl < UFS_PM_LVL_5) {
+ host->saved_spm_lvl = hba->spm_lvl;
+ hba->spm_lvl = UFS_PM_LVL_5;
+ }
+
+ err = ufshcd_suspend_prepare(dev);
+
+ if (err < 0 && host->saved_spm_lvl != -1) {
+ hba->spm_lvl = host->saved_spm_lvl;
+ host->saved_spm_lvl = -1;
+ }
+
+ return err;
+}
+
+static void ufs_intel_resume_complete(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_resume_complete(dev);
+
+ if (host->saved_spm_lvl != -1) {
+ hba->spm_lvl = host->saved_spm_lvl;
+ host->saved_spm_lvl = -1;
+ }
+}
+
+static int ufshcd_pci_suspend_prepare(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!strcmp(hba->vops->name, "intel-pci"))
+ return ufs_intel_suspend_prepare(dev);
+
+ return ufshcd_suspend_prepare(dev);
+}
+
+static void ufshcd_pci_resume_complete(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!strcmp(hba->vops->name, "intel-pci")) {
+ ufs_intel_resume_complete(dev);
+ return;
+ }
+
+ ufshcd_resume_complete(dev);
+}
#endif
/**
@@ -611,8 +675,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
.thaw = ufshcd_system_resume,
.poweroff = ufshcd_system_suspend,
.restore = ufshcd_pci_restore,
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
+ .prepare = ufshcd_pci_suspend_prepare,
+ .complete = ufshcd_pci_resume_complete,
#endif
};
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5bc53875330..47f589c4104a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -467,6 +467,8 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c1), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
{ USB_DEVICE(0x12d1, 0x15c3), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
diff --git a/drivers/usb/dwc3/dwc3-generic-plat.c b/drivers/usb/dwc3/dwc3-generic-plat.c
index d96b20570002..f8ad79c08c4e 100644
--- a/drivers/usb/dwc3/dwc3-generic-plat.c
+++ b/drivers/usb/dwc3/dwc3-generic-plat.c
@@ -85,11 +85,8 @@ static int dwc3_generic_probe(struct platform_device *pdev)
static void dwc3_generic_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
- struct dwc3_generic *dwc3g = to_dwc3_generic(dwc);
dwc3_core_remove(dwc);
-
- clk_bulk_disable_unprepare(dwc3g->num_clocks, dwc3g->clks);
}
static int dwc3_generic_suspend(struct device *dev)
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 20165e1582d9..b71680c58de6 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -667,8 +667,6 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
return ERR_PTR(-EINVAL);
if (!usb_raw_io_flags_valid(io->flags))
return ERR_PTR(-EINVAL);
- if (io->length > PAGE_SIZE)
- return ERR_PTR(-EINVAL);
if (get_from_user)
data = memdup_user(ptr + sizeof(*io), io->length);
else {
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 63edf2d8f245..ecda964e018a 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -892,7 +892,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC configured\n");
portsc = readl(&dbc->regs->portsc);
writel(portsc, &dbc->regs->portsc);
- return EVT_GSER;
+ ret = EVT_GSER;
+ break;
}
return EVT_DONE;
@@ -954,7 +955,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
- ret = EVT_XFER_DONE;
+ if (ret != EVT_GSER)
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -1390,8 +1392,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci)
if (!dbc)
return 0;
- if (dbc->state == DS_CONFIGURED)
+ switch (dbc->state) {
+ case DS_ENABLED:
+ case DS_CONNECTED:
+ case DS_CONFIGURED:
dbc->resume_required = 1;
+ break;
+ default:
+ break;
+ }
xhci_dbc_stop(dbc);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5c8ab519f497..f67a4d956204 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -582,6 +582,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
+ xhci->allow_single_roothub = 1;
+
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
@@ -637,7 +639,6 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
xhci = hcd_to_xhci(hcd);
xhci->reset = reset;
- xhci->allow_single_roothub = 1;
if (!xhci_has_one_roothub(xhci)) {
xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
pci_name(dev), hcd);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 09ac6f1c985f..0b56b773dbdf 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -182,6 +182,7 @@ config USB_LJCA
config USB_USBIO
tristate "Intel USBIO Bridge support"
depends on USB && ACPI
+ depends on X86 || COMPILE_TEST
select AUXILIARY_BUS
help
This adds support for Intel USBIO drivers.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 62e984d20e59..5de856f65f0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM05CN 0x0312
#define QUECTEL_PRODUCT_EM05G_GR 0x0313
#define QUECTEL_PRODUCT_EM05G_RS 0x0314
+#define QUECTEL_PRODUCT_RG255C 0x0316
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
@@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
+#define UNISOC_PRODUCT_UIS7720 0x4064
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
@@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
@@ -2466,6 +2475,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index b2a568a5bc9b..cc78770509db 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7876,9 +7876,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
- port->role_sw = usb_role_switch_get(port->dev);
+ port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
if (!port->role_sw)
- port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
+ port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
goto out_destroy_wq;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 82034efb74fc..a7936bd1aabe 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -573,6 +573,8 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
vcq->mcq.set_ci_db = vcq->db.db;
vcq->mcq.arm_db = vcq->db.db + 1;
vcq->mcq.cqe_sz = 64;
+ vcq->mcq.comp = mlx5_vdpa_cq_comp;
+ vcq->cqe = num_ent;
err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
if (err)
@@ -612,10 +614,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
if (err)
goto err_vec;
- vcq->mcq.comp = mlx5_vdpa_cq_comp;
- vcq->cqe = num_ent;
- vcq->mcq.set_ci_db = vcq->db.db;
- vcq->mcq.arm_db = vcq->db.db + 1;
mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
kfree(in);
return 0;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 916cad80941c..5167bec14e36 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -38,6 +38,7 @@
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/mm_inline.h>
+#include <linux/overflow.h>
#include "vfio.h"
#define DRIVER_VERSION "0.2"
@@ -167,12 +168,14 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
{
struct rb_node *node = iommu->dma_list.rb_node;
+ WARN_ON(!size);
+
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
- if (start + size <= dma->iova)
+ if (start + size - 1 < dma->iova)
node = node->rb_left;
- else if (start >= dma->iova + dma->size)
+ else if (start > dma->iova + dma->size - 1)
node = node->rb_right;
else
return dma;
@@ -182,16 +185,19 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
}
static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
- dma_addr_t start, u64 size)
+ dma_addr_t start,
+ dma_addr_t end)
{
struct rb_node *res = NULL;
struct rb_node *node = iommu->dma_list.rb_node;
struct vfio_dma *dma_res = NULL;
+ WARN_ON(end < start);
+
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
- if (start < dma->iova + dma->size) {
+ if (start <= dma->iova + dma->size - 1) {
res = node;
dma_res = dma;
if (start >= dma->iova)
@@ -201,7 +207,7 @@ static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
node = node->rb_right;
}
}
- if (res && size && dma_res->iova >= start + size)
+ if (res && dma_res->iova > end)
res = NULL;
return res;
}
@@ -211,11 +217,13 @@ static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
struct vfio_dma *dma;
+ WARN_ON(new->size != 0);
+
while (*link) {
parent = *link;
dma = rb_entry(parent, struct vfio_dma, node);
- if (new->iova + new->size <= dma->iova)
+ if (new->iova <= dma->iova)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
@@ -895,14 +903,20 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
unsigned long remote_vaddr;
struct vfio_dma *dma;
bool do_accounting;
+ dma_addr_t iova_end;
+ size_t iova_size;
- if (!iommu || !pages)
+ if (!iommu || !pages || npage <= 0)
return -EINVAL;
/* Supported for v2 version only */
if (!iommu->v2)
return -EACCES;
+ if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
+ check_add_overflow(user_iova, iova_size - 1, &iova_end))
+ return -EOVERFLOW;
+
mutex_lock(&iommu->lock);
if (WARN_ONCE(iommu->vaddr_invalid_count,
@@ -1008,12 +1022,21 @@ static void vfio_iommu_type1_unpin_pages(void *iommu_data,
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
+ dma_addr_t iova_end;
+ size_t iova_size;
int i;
/* Supported for v2 version only */
if (WARN_ON(!iommu->v2))
return;
+ if (WARN_ON(npage <= 0))
+ return;
+
+ if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
+ check_add_overflow(user_iova, iova_size - 1, &iova_end)))
+ return;
+
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
@@ -1067,7 +1090,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
#define VFIO_IOMMU_TLB_SYNC_MAX 512
static size_t unmap_unpin_fast(struct vfio_domain *domain,
- struct vfio_dma *dma, dma_addr_t *iova,
+ struct vfio_dma *dma, dma_addr_t iova,
size_t len, phys_addr_t phys, long *unlocked,
struct list_head *unmapped_list,
int *unmapped_cnt,
@@ -1077,18 +1100,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
- unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+ unmapped = iommu_unmap_fast(domain->domain, iova, len,
iotlb_gather);
if (!unmapped) {
kfree(entry);
} else {
- entry->iova = *iova;
+ entry->iova = iova;
entry->phys = phys;
entry->len = unmapped;
list_add_tail(&entry->list, unmapped_list);
- *iova += unmapped;
(*unmapped_cnt)++;
}
}
@@ -1107,18 +1129,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
}
static size_t unmap_unpin_slow(struct vfio_domain *domain,
- struct vfio_dma *dma, dma_addr_t *iova,
+ struct vfio_dma *dma, dma_addr_t iova,
size_t len, phys_addr_t phys,
long *unlocked)
{
- size_t unmapped = iommu_unmap(domain->domain, *iova, len);
+ size_t unmapped = iommu_unmap(domain->domain, iova, len);
if (unmapped) {
- *unlocked += vfio_unpin_pages_remote(dma, *iova,
+ *unlocked += vfio_unpin_pages_remote(dma, iova,
phys >> PAGE_SHIFT,
unmapped >> PAGE_SHIFT,
false);
- *iova += unmapped;
cond_resched();
}
return unmapped;
@@ -1127,12 +1148,12 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
bool do_accounting)
{
- dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
+ size_t pos = 0;
if (!dma->size)
return 0;
@@ -1156,13 +1177,14 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
}
iommu_iotlb_gather_init(&iotlb_gather);
- while (iova < end) {
+ while (pos < dma->size) {
size_t unmapped, len;
phys_addr_t phys, next;
+ dma_addr_t iova = dma->iova + pos;
phys = iommu_iova_to_phys(domain->domain, iova);
if (WARN_ON(!phys)) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
@@ -1171,7 +1193,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* may require hardware cache flushing, try to find the
* largest contiguous physical memory chunk to unmap.
*/
- for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) {
+ for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) {
next = iommu_iova_to_phys(domain->domain, iova + len);
if (next != phys + len)
break;
@@ -1181,16 +1203,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* First, try to use fast unmap/unpin. In case of failure,
* switch to slow unmap/unpin path.
*/
- unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+ unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,
&unlocked, &unmapped_region_list,
&unmapped_region_cnt,
&iotlb_gather);
if (!unmapped) {
- unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+ unmapped = unmap_unpin_slow(domain, dma, iova, len,
phys, &unlocked);
if (WARN_ON(!unmapped))
break;
}
+
+ pos += unmapped;
}
dma->iommu_mapped = false;
@@ -1282,7 +1306,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
}
static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
- dma_addr_t iova, size_t size, size_t pgsize)
+ dma_addr_t iova, dma_addr_t iova_end, size_t pgsize)
{
struct vfio_dma *dma;
struct rb_node *n;
@@ -1299,8 +1323,8 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
if (dma && dma->iova != iova)
return -EINVAL;
- dma = vfio_find_dma(iommu, iova + size - 1, 0);
- if (dma && dma->iova + dma->size != iova + size)
+ dma = vfio_find_dma(iommu, iova_end, 1);
+ if (dma && dma->iova + dma->size - 1 != iova_end)
return -EINVAL;
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
@@ -1309,7 +1333,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
if (dma->iova < iova)
continue;
- if (dma->iova > iova + size - 1)
+ if (dma->iova > iova_end)
break;
ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
@@ -1374,7 +1398,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
int ret = -EINVAL, retries = 0;
unsigned long pgshift;
dma_addr_t iova = unmap->iova;
- u64 size = unmap->size;
+ dma_addr_t iova_end;
+ size_t size = unmap->size;
bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
struct rb_node *n, *first_n;
@@ -1387,6 +1412,11 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
goto unlock;
}
+ if (iova != unmap->iova || size != unmap->size) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
+
pgshift = __ffs(iommu->pgsize_bitmap);
pgsize = (size_t)1 << pgshift;
@@ -1396,10 +1426,15 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
if (unmap_all) {
if (iova || size)
goto unlock;
- size = U64_MAX;
- } else if (!size || size & (pgsize - 1) ||
- iova + size - 1 < iova || size > SIZE_MAX) {
- goto unlock;
+ iova_end = ~(dma_addr_t)0;
+ } else {
+ if (!size || size & (pgsize - 1))
+ goto unlock;
+
+ if (check_add_overflow(iova, size - 1, &iova_end)) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
}
/* When dirty tracking is enabled, allow only min supported pgsize */
@@ -1446,17 +1481,17 @@ again:
if (dma && dma->iova != iova)
goto unlock;
- dma = vfio_find_dma(iommu, iova + size - 1, 0);
- if (dma && dma->iova + dma->size != iova + size)
+ dma = vfio_find_dma(iommu, iova_end, 1);
+ if (dma && dma->iova + dma->size - 1 != iova_end)
goto unlock;
}
ret = 0;
- n = first_n = vfio_find_dma_first_node(iommu, iova, size);
+ n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end);
while (n) {
dma = rb_entry(n, struct vfio_dma, node);
- if (dma->iova >= iova + size)
+ if (dma->iova > iova_end)
break;
if (!iommu->v2 && iova > dma->iova)
@@ -1648,7 +1683,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
{
bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
dma_addr_t iova = map->iova;
+ dma_addr_t iova_end;
unsigned long vaddr = map->vaddr;
+ unsigned long vaddr_end;
size_t size = map->size;
int ret = 0, prot = 0;
size_t pgsize;
@@ -1656,8 +1693,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
+ return -EOVERFLOW;
+
+ if (!size)
return -EINVAL;
+ if (check_add_overflow(iova, size - 1, &iova_end) ||
+ check_add_overflow(vaddr, size - 1, &vaddr_end))
+ return -EOVERFLOW;
+
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
@@ -1673,13 +1717,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
WARN_ON((pgsize - 1) & PAGE_MASK);
- if (!size || (size | iova | vaddr) & (pgsize - 1)) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- /* Don't allow IOVA or virtual address wrap */
- if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
+ if ((size | iova | vaddr) & (pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1710,7 +1748,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
- if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
+ if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1783,12 +1821,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
- dma_addr_t iova;
+ size_t pos = 0;
dma = rb_entry(n, struct vfio_dma, node);
- iova = dma->iova;
- while (iova < dma->iova + dma->size) {
+ while (pos < dma->size) {
+ dma_addr_t iova = dma->iova + pos;
phys_addr_t phys;
size_t size;
@@ -1804,14 +1842,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
- while (i < dma->iova + dma->size &&
+ while (pos + size < dma->size &&
p == iommu_iova_to_phys(d->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
@@ -1819,9 +1857,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
} else {
unsigned long pfn;
- unsigned long vaddr = dma->vaddr +
- (iova - dma->iova);
- size_t n = dma->iova + dma->size - iova;
+ unsigned long vaddr = dma->vaddr + pos;
+ size_t n = dma->size - pos;
long npage;
npage = vfio_pin_pages_remote(dma, vaddr,
@@ -1852,7 +1889,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
goto unwind;
}
- iova += size;
+ pos += size;
}
}
@@ -1869,29 +1906,29 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
unwind:
for (; n; n = rb_prev(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
- dma_addr_t iova;
+ size_t pos = 0;
if (dma->iommu_mapped) {
iommu_unmap(domain->domain, dma->iova, dma->size);
continue;
}
- iova = dma->iova;
- while (iova < dma->iova + dma->size) {
+ while (pos < dma->size) {
+ dma_addr_t iova = dma->iova + pos;
phys_addr_t phys, p;
size_t size;
dma_addr_t i;
phys = iommu_iova_to_phys(domain->domain, iova);
if (!phys) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
- while (i < dma->iova + dma->size &&
+ while (pos + size < dma->size &&
p == iommu_iova_to_phys(domain->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
@@ -2977,7 +3014,8 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
- size_t iommu_pgsize;
+ size_t size, iommu_pgsize;
+ dma_addr_t iova, iova_end;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
@@ -2986,14 +3024,24 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
sizeof(range)))
return -EFAULT;
- if (range.iova + range.size < range.iova)
+ iova = range.iova;
+ size = range.size;
+
+ if (iova != range.iova || size != range.size)
+ return -EOVERFLOW;
+
+ if (!size)
return -EINVAL;
+
+ if (check_add_overflow(iova, size - 1, &iova_end))
+ return -EOVERFLOW;
+
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
pgshift = __ffs(range.bitmap.pgsize);
- ret = verify_bitmap_size(range.size >> pgshift,
+ ret = verify_bitmap_size(size >> pgshift,
range.bitmap.size);
if (ret)
return ret;
@@ -3007,19 +3055,18 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
ret = -EINVAL;
goto out_unlock;
}
- if (range.iova & (iommu_pgsize - 1)) {
+ if (iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
- if (!range.size || range.size & (iommu_pgsize - 1)) {
+ if (size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
- iommu, range.iova,
- range.size,
+ iommu, iova, iova_end,
range.bitmap.pgsize);
else
ret = -EINVAL;
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 210fd3ac18a4..56ef1d88e003 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2614,8 +2614,12 @@ static int aty_init(struct fb_info *info)
pr_cont("\n");
}
#endif
- if (par->pll_ops->init_pll)
- par->pll_ops->init_pll(info, &par->pll);
+ if (par->pll_ops->init_pll) {
+ ret = par->pll_ops->init_pll(info, &par->pll);
+ if (ret)
+ return ret;
+ }
+
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index a9ec7f488522..dc5ad3fcc7be 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -79,12 +79,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,
struct fb_image *image, u8 *buf, u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ unsigned int charcnt = vc->vc_font.charcount;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
+ u16 ch = scr_readw(s++) & charmask;
+
+ if (ch >= charcnt)
+ ch = 0;
+ src = vc->vc_font.data + (unsigned int)ch * cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
@@ -112,14 +116,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc,
u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ unsigned int charcnt = vc->vc_font.charcount;
u32 shift_low = 0, mod = vc->vc_font.width % 8;
u32 shift_high = 8;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
+ u16 ch = scr_readw(s++) & charmask;
+
+ if (ch >= charcnt)
+ ch = 0;
+ src = vc->vc_font.data + (unsigned int)ch * cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 96cc9b389246..9bd3c3814b5c 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2810,6 +2810,25 @@ int fbcon_mode_deleted(struct fb_info *info,
return found;
}
+static void fbcon_delete_mode(struct fb_videomode *m)
+{
+ struct fbcon_display *p;
+
+ for (int i = first_fb_vc; i <= last_fb_vc; i++) {
+ p = &fb_display[i];
+ if (p->mode == m)
+ p->mode = NULL;
+ }
+}
+
+void fbcon_delete_modelist(struct list_head *head)
+{
+ struct fb_modelist *modelist;
+
+ list_for_each_entry(modelist, head, list)
+ fbcon_delete_mode(&modelist->mode);
+}
+
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static void fbcon_unbind(void)
{
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 53f1719b1ae1..eff757ebbed1 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -544,6 +544,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
fb_info->pixmap.addr = NULL;
}
+ fbcon_delete_modelist(&fb_info->modelist);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[fb_info->node] = NULL;
num_registered_fb--;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index cbdb1caf61bd..0b8d23c12b77 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -192,7 +192,7 @@ static unsigned long pvr2fb_map;
#ifdef CONFIG_PVR2_DMA
static unsigned int shdma = PVR2_CASCADE_CHAN;
-static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
+static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS;
#endif
static struct fb_videomode pvr2_modedb[] = {
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 91d070ef6989..6ff059ee1694 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -329,11 +329,13 @@ static int __init valkyriefb_init(void)
if (of_address_to_resource(dp, 0, &r)) {
printk(KERN_ERR "can't find address for valkyrie\n");
+ of_node_put(dp);
return 0;
}
frame_buffer_phys = r.start;
cmap_regs_phys = r.start + 0x304000;
+ of_node_put(dp);
}
#endif /* ppc (!CONFIG_MAC) */