diff options
| author | Martin K. Petersen <martin.petersen@oracle.com> | 2025-11-12 21:30:23 -0500 |
|---|---|---|
| committer | Martin K. Petersen <martin.petersen@oracle.com> | 2025-11-12 21:30:23 -0500 |
| commit | e360bb6dc843ffa59283705dd167478ea317567c (patch) | |
| tree | 9eae454006b386d10e444ada13f01634975e00a4 /drivers/target | |
| parent | 3813d28b2b12dea8f44a6828b9c30b38208d3fc5 (diff) | |
| parent | 8d5cad38cf7da7848a2f4d7ca5adb4110b2cd968 (diff) | |
Merge patch series "replace old wq(s), added WQ_PERCPU to alloc_workqueue"
Marco Crivellari <marco.crivellari@suse.com> says:
Hi,
=== Current situation: problems ===
Let's consider a nohz_full system with isolated CPUs: wq_unbound_cpumask is
set to the housekeeping CPUs, for !WQ_UNBOUND the local CPU is selected.
This leads to different scenarios if a work item is scheduled on an
isolated CPU where "delay" value is 0 or greater then 0:
schedule_delayed_work(, 0);
This will be handled by __queue_work() that will queue the work item on the
current local (isolated) CPU, while:
schedule_delayed_work(, 1);
Will move the timer on an housekeeping CPU, and schedule the work there.
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.
This lack of consistency cannot be addressed without refactoring the API.
=== Recent changes to the WQ API ===
The following, address the recent changes in the Workqueue API:
- commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
- commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
The old workqueues will be removed in a future release cycle.
=== Introduced Changes by this series ===
1) [P 1] Replace uses of system_wq and system_unbound_wq
system_unbound_wq is to be used when locality is not required.
Because of that, system_unbound_wq has been replaced with
system_dfl_wq, to make clear it should be used when locality
is not required.
2) [P 2-3-4] WQ_PERCPU added to alloc_workqueue()
This change adds a new WQ_PERCPU flag to explicitly request
alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified.
Thanks!
Link: https://patch.msgid.link/20251031095643.74246-1-marco.crivellari@suse.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target')
| -rw-r--r-- | drivers/target/sbp/sbp_target.c | 8 | ||||
| -rw-r--r-- | drivers/target/target_core_transport.c | 4 | ||||
| -rw-r--r-- | drivers/target/target_core_xcopy.c | 2 | ||||
| -rw-r--r-- | drivers/target/tcm_fc/tfc_conf.c | 2 |
4 files changed, 8 insertions, 8 deletions
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 3b89b5a70331..b8457477cee9 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -730,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", agent->orb_pointer); - queue_work(system_unbound_wq, &agent->work); + queue_work(system_dfl_wq, &agent->work); return RCODE_COMPLETE; @@ -764,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, pr_debug("tgt_agent DOORBELL\n"); - queue_work(system_unbound_wq, &agent->work); + queue_work(system_dfl_wq, &agent->work); return RCODE_COMPLETE; @@ -990,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work) if (tgt_agent_check_active(agent) && !doorbell) { INIT_WORK(&req->work, tgt_agent_process_work); - queue_work(system_unbound_wq, &req->work); + queue_work(system_dfl_wq, &req->work); } else { /* don't process this request, just check next_ORB */ sbp_free_request(req); @@ -1618,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card, agent->orb_offset = sbp2_pointer_to_addr(ptr); agent->request = req; - queue_work(system_unbound_wq, &agent->work); + queue_work(system_dfl_wq, &agent->work); rcode = RCODE_COMPLETE; } else if (tcode == TCODE_READ_BLOCK_REQUEST) { addr_to_sbp2_pointer(agent->orb_offset, ptr); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index fca9b44288bc..e8b7955d40f2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -126,12 +126,12 @@ int init_se_kmem_caches(void) } target_completion_wq = alloc_workqueue("target_completion", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!target_completion_wq) goto out_free_lba_map_mem_cache; target_submission_wq = alloc_workqueue("target_submission", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!target_submission_wq) goto out_free_completion_wq; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 877ce58c0a70..93534a6e14b7 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = { int target_xcopy_setup_pt(void) { - xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!xcopy_wq) { pr_err("Unable to allocate xcopy_wq\n"); return -ENOMEM; diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 639fc358ed0f..f686d95d3273 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name) tpg->lport_wwn = ft_wwn; INIT_LIST_HEAD(&tpg->lun_list); - wq = alloc_workqueue("tcm_fc", 0, 1); + wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1); if (!wq) { kfree(tpg); return NULL; |